##// END OF EJS Templates
Drop Python 3.10
M Bussonnier -
Show More
@@ -1,73 +1,73
1 1 name: Run Downstream tests
2 2
3 3 on:
4 4 push:
5 5 pull_request:
6 6 # Run weekly on Monday at 1:23 UTC
7 7 schedule:
8 8 - cron: '23 1 * * 1'
9 9 workflow_dispatch:
10 10
11 11 permissions:
12 12 contents: read
13 13
14 14 jobs:
15 15 test:
16 16 runs-on: ${{ matrix.os }}
17 17 # Disable scheduled CI runs on forks
18 18 if: github.event_name != 'schedule' || github.repository_owner == 'ipython'
19 19 strategy:
20 20 matrix:
21 21 os: [ubuntu-latest]
22 python-version: ["3.10"]
22 python-version: ["3.13"]
23 23 include:
24 24 - os: macos-13
25 python-version: "3.10"
25 python-version: "3.13"
26 26
27 27 steps:
28 28 - uses: actions/checkout@v4
29 29 - name: Set up Python ${{ matrix.python-version }}
30 30 uses: actions/setup-python@v5
31 31 with:
32 32 python-version: ${{ matrix.python-version }}
33 33 - name: Update Python installer
34 34 run: |
35 35 python -m pip install --upgrade pip setuptools wheel
36 36 - name: Install ipykernel
37 37 run: |
38 38 cd ..
39 39 git clone https://github.com/ipython/ipykernel
40 40 cd ipykernel
41 41 pip install -e .[test]
42 42 cd ..
43 43 - name: Install and update Python dependencies
44 44 run: |
45 45 python -m pip install --upgrade -e file://$PWD#egg=ipython[test]
46 46 # we must install IPython after ipykernel to get the right versions.
47 47 python -m pip install --upgrade --upgrade-strategy eager flaky ipyparallel
48 48 - name: pytest ipykernel
49 49 env:
50 50 COLUMNS: 120
51 51 run: |
52 52 cd ../ipykernel
53 53 pytest
54 54 - name: Install sagemath-repl
55 55 run: |
56 56 # Sept 2024, sage has been failing for a while,
57 57 # Skipping.
58 58 # cd ..
59 59 # git clone --depth 1 https://github.com/sagemath/sage
60 60 # cd sage
61 61 # # We cloned it for the tests, but for simplicity we install the
62 62 # # wheels from PyPI.
63 63 # # (Avoid 10.3b6 because of https://github.com/sagemath/sage/pull/37178)
64 64 # pip install --pre sagemath-repl sagemath-environment
65 65 # # Install optionals that make more tests pass
66 66 # pip install pillow
67 67 # pip install --pre sagemath-categories
68 68 # cd ..
69 69 - name: Test sagemath-repl
70 70 run: |
71 71 # cd ../sage/
72 72 # # From https://github.com/sagemath/sage/blob/develop/pkgs/sagemath-repl/tox.ini
73 73 # sage-runtests -p --environment=sage.all__sagemath_repl --baseline-stats-path=pkgs/sagemath-repl/known-test-failures.json --initial --optional=sage src/sage/repl src/sage/doctest src/sage/misc/sage_input.py src/sage/misc/sage_eval.py
@@ -1,34 +1,34
1 1 name: Nightly Wheel builder
2 2 on:
3 3 workflow_dispatch:
4 4 schedule:
5 5 # this cron is ran every Sunday at midnight UTC
6 6 - cron: '0 0 * * 0'
7 7
8 8 jobs:
9 9 upload_anaconda:
10 10 name: Upload to Anaconda
11 11 runs-on: ubuntu-latest
12 12 # The artifacts cannot be uploaded on PRs, also disable scheduled CI runs on forks
13 13 if: github.event_name != 'pull_request' && (github.event_name != 'schedule' || github.repository_owner == 'ipython')
14 14
15 15 steps:
16 16 - uses: actions/checkout@v4
17 17 - name: Set up Python
18 18 uses: actions/setup-python@v5
19 19 with:
20 python-version: "3.10"
20 python-version: "3.13"
21 21 cache: pip
22 22 cache-dependency-path: |
23 23 pyproject.toml
24 24 - name: Try building with Python build
25 25 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
26 26 run: |
27 27 python -m pip install build
28 28 python -m build
29 29
30 30 - name: Upload wheel
31 31 uses: scientific-python/upload-nightly-action@main
32 32 with:
33 33 artifacts_path: dist
34 34 anaconda_nightly_upload_token: ${{secrets.UPLOAD_TOKEN}}
@@ -1,109 +1,106
1 1 name: Run tests
2 2
3 3 on:
4 4 push:
5 5 branches:
6 6 - main
7 7 - '*.x'
8 8 pull_request:
9 9 # Run weekly on Monday at 1:23 UTC
10 10 schedule:
11 11 - cron: '23 1 * * 1'
12 12 workflow_dispatch:
13 13
14 14
15 15 jobs:
16 16 test:
17 17 runs-on: ${{ matrix.os }}
18 18 # Disable scheduled CI runs on forks
19 19 if: github.event_name != 'schedule' || github.repository_owner == 'ipython'
20 20 strategy:
21 21 fail-fast: false
22 22 matrix:
23 23 os: [ubuntu-latest, windows-latest]
24 python-version: ["3.10", "3.11", "3.12","3.13"]
24 python-version: ["3.11", "3.12","3.13"]
25 25 deps: [test_extra]
26 26 # Test all on ubuntu, test ends on macos
27 27 include:
28 28 - os: macos-latest
29 python-version: "3.10"
30 deps: test_extra
31 - os: macos-latest
32 29 python-version: "3.11"
33 30 deps: test_extra
34 31 # Tests minimal dependencies set
35 32 - os: ubuntu-latest
36 33 python-version: "3.11"
37 34 deps: test
38 35 # Tests latest development Python version
39 36 - os: ubuntu-latest
40 37 python-version: "3.13"
41 38 deps: test
42 39 # Installing optional dependencies stuff takes ages on PyPy
43 40 - os: ubuntu-latest
44 python-version: "pypy-3.10"
41 python-version: "pypy-3.11"
45 42 deps: test
46 43 - os: windows-latest
47 python-version: "pypy-3.10"
44 python-version: "pypy-3.11"
48 45 deps: test
49 46 - os: macos-latest
50 python-version: "pypy-3.10"
47 python-version: "pypy-3.11"
51 48 deps: test
52 49 # Temporary CI run to use entry point compatible code in matplotlib-inline.
53 50 - os: ubuntu-latest
54 51 python-version: "3.12"
55 52 deps: test_extra
56 53 want-latest-entry-point-code: true
57 54
58 55 steps:
59 56 - uses: actions/checkout@v4
60 57 - name: Set up Python ${{ matrix.python-version }}
61 58 uses: actions/setup-python@v5
62 59 with:
63 60 python-version: ${{ matrix.python-version }}
64 61 cache: pip
65 62 cache-dependency-path: |
66 63 pyproject.toml
67 64 - name: Install latex
68 65 if: runner.os == 'Linux' && matrix.deps == 'test_extra'
69 66 run: echo "disable latex for now, issues in mirros" #sudo apt-get -yq -o Acquire::Retries=3 --no-install-suggests --no-install-recommends install texlive dvipng
70 67 - name: Install and update Python dependencies (binary only)
71 68 if: ${{ ! contains( matrix.python-version, 'dev' ) }}
72 69 run: |
73 70 python -m pip install --only-binary ':all:' --upgrade pip setuptools wheel build
74 71 python -m pip install --only-binary ':all:' --no-binary curio --upgrade -e .[${{ matrix.deps }}]
75 72 python -m pip install --only-binary ':all:' --upgrade check-manifest pytest-cov 'pytest<8'
76 73 - name: Install and update Python dependencies (dev?)
77 74 if: ${{ contains( matrix.python-version, 'dev' ) }}
78 75 run: |
79 76 python -m pip install --pre --upgrade pip setuptools wheel build
80 77 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --no-binary curio --upgrade -e .[${{ matrix.deps }}]
81 78 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --upgrade check-manifest pytest-cov
82 79 - name: Try building with Python build
83 80 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
84 81 run: |
85 82 python -m build
86 83 shasum -a 256 dist/*
87 84 - name: Check manifest
88 85 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
89 86 run: check-manifest
90 87
91 88 - name: Install entry point compatible code (TEMPORARY)
92 89 if: matrix.want-latest-entry-point-code
93 90 run: |
94 91 python -m pip list
95 92 # Not installing matplotlib's entry point code as building matplotlib from source is complex.
96 93 # Rely upon matplotlib to test all the latest entry point branches together.
97 94 python -m pip install --upgrade git+https://github.com/ipython/matplotlib-inline.git@main
98 95 python -m pip list
99 96
100 97 - name: pytest
101 98 env:
102 99 COLUMNS: 120
103 100 run: |
104 101 pytest --color=yes -raXxs ${{ startsWith(matrix.python-version, 'pypy') && ' ' || '--cov --cov-report=xml' }} --maxfail=15
105 102 - name: Upload coverage to Codecov
106 103 uses: codecov/codecov-action@v4
107 104 with:
108 105 name: Test
109 106 files: /home/runner/work/ipython/ipython/coverage.xml
@@ -1,163 +1,164
1 1 # PYTHON_ARGCOMPLETE_OK
2 2 """
3 3 IPython: tools for interactive and parallel computing in Python.
4 4
5 5 https://ipython.org
6 6 """
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (c) 2008-2011, IPython Development Team.
9 9 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
10 10 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
11 11 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
12 12 #
13 13 # Distributed under the terms of the Modified BSD License.
14 14 #
15 15 # The full license is in the file COPYING.txt, distributed with this software.
16 16 #-----------------------------------------------------------------------------
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Imports
20 20 #-----------------------------------------------------------------------------
21 21
22 22 import sys
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Setup everything
26 26 #-----------------------------------------------------------------------------
27 27
28 28 # Don't forget to also update setup.py when this changes!
29 if sys.version_info < (3, 10):
29 if sys.version_info < (3, 11):
30 30 raise ImportError(
31 31 """
32 IPython 8.31+ supports Python 3.11 and above, following SPEC0
32 33 IPython 8.19+ supports Python 3.10 and above, following SPEC0.
33 34 IPython 8.13+ supports Python 3.9 and above, following NEP 29.
34 35 IPython 8.0-8.12 supports Python 3.8 and above, following NEP 29.
35 36 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
36 37 Python 3.3 and 3.4 were supported up to IPython 6.x.
37 38 Python 3.5 was supported with IPython 7.0 to 7.9.
38 39 Python 3.6 was supported with IPython up to 7.16.
39 40 Python 3.7 was still supported with the 7.x branch.
40 41
41 42 See IPython `README.rst` file for more information:
42 43
43 44 https://github.com/ipython/ipython/blob/main/README.rst
44 45
45 46 """
46 47 )
47 48
48 49 #-----------------------------------------------------------------------------
49 50 # Setup the top level names
50 51 #-----------------------------------------------------------------------------
51 52
52 53 from .core.getipython import get_ipython
53 54 from .core import release
54 55 from .core.application import Application
55 56 from .terminal.embed import embed
56 57
57 58 from .core.interactiveshell import InteractiveShell
58 59 from .utils.sysinfo import sys_info
59 60 from .utils.frame import extract_module_locals
60 61
61 62 __all__ = ["start_ipython", "embed", "start_kernel", "embed_kernel"]
62 63
63 64 # Release data
64 65 __author__ = '%s <%s>' % (release.author, release.author_email)
65 66 __license__ = release.license
66 67 __version__ = release.version
67 68 version_info = release.version_info
68 69 # list of CVEs that should have been patched in this release.
69 70 # this is informational and should not be relied upon.
70 71 __patched_cves__ = {"CVE-2022-21699", "CVE-2023-24816"}
71 72
72 73
73 74 def embed_kernel(module=None, local_ns=None, **kwargs):
74 75 """Embed and start an IPython kernel in a given scope.
75 76
76 77 If you don't want the kernel to initialize the namespace
77 78 from the scope of the surrounding function,
78 79 and/or you want to load full IPython configuration,
79 80 you probably want `IPython.start_kernel()` instead.
80 81
81 82 Parameters
82 83 ----------
83 84 module : types.ModuleType, optional
84 85 The module to load into IPython globals (default: caller)
85 86 local_ns : dict, optional
86 87 The namespace to load into IPython user namespace (default: caller)
87 88 **kwargs : various, optional
88 89 Further keyword args are relayed to the IPKernelApp constructor,
89 90 such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`),
90 91 allowing configuration of the kernel (see :ref:`kernel_options`). Will only have an effect
91 92 on the first embed_kernel call for a given process.
92 93 """
93 94
94 95 (caller_module, caller_locals) = extract_module_locals(1)
95 96 if module is None:
96 97 module = caller_module
97 98 if local_ns is None:
98 99 local_ns = caller_locals
99 100
100 101 # Only import .zmq when we really need it
101 102 from ipykernel.embed import embed_kernel as real_embed_kernel
102 103 real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
103 104
104 105 def start_ipython(argv=None, **kwargs):
105 106 """Launch a normal IPython instance (as opposed to embedded)
106 107
107 108 `IPython.embed()` puts a shell in a particular calling scope,
108 109 such as a function or method for debugging purposes,
109 110 which is often not desirable.
110 111
111 112 `start_ipython()` does full, regular IPython initialization,
112 113 including loading startup files, configuration, etc.
113 114 much of which is skipped by `embed()`.
114 115
115 116 This is a public API method, and will survive implementation changes.
116 117
117 118 Parameters
118 119 ----------
119 120 argv : list or None, optional
120 121 If unspecified or None, IPython will parse command-line options from sys.argv.
121 122 To prevent any command-line parsing, pass an empty list: `argv=[]`.
122 123 user_ns : dict, optional
123 124 specify this dictionary to initialize the IPython user namespace with particular values.
124 125 **kwargs : various, optional
125 126 Any other kwargs will be passed to the Application constructor,
126 127 such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`),
127 128 allowing configuration of the instance (see :ref:`terminal_options`).
128 129 """
129 130 from IPython.terminal.ipapp import launch_new_instance
130 131 return launch_new_instance(argv=argv, **kwargs)
131 132
132 133 def start_kernel(argv=None, **kwargs):
133 134 """Launch a normal IPython kernel instance (as opposed to embedded)
134 135
135 136 `IPython.embed_kernel()` puts a shell in a particular calling scope,
136 137 such as a function or method for debugging purposes,
137 138 which is often not desirable.
138 139
139 140 `start_kernel()` does full, regular IPython initialization,
140 141 including loading startup files, configuration, etc.
141 142 much of which is skipped by `embed_kernel()`.
142 143
143 144 Parameters
144 145 ----------
145 146 argv : list or None, optional
146 147 If unspecified or None, IPython will parse command-line options from sys.argv.
147 148 To prevent any command-line parsing, pass an empty list: `argv=[]`.
148 149 user_ns : dict, optional
149 150 specify this dictionary to initialize the IPython user namespace with particular values.
150 151 **kwargs : various, optional
151 152 Any other kwargs will be passed to the Application constructor,
152 153 such as `config`, a traitlets :class:`Config` object (see :ref:`configure_start_ipython`),
153 154 allowing configuration of the kernel (see :ref:`kernel_options`).
154 155 """
155 156 import warnings
156 157
157 158 warnings.warn(
158 159 "start_kernel is deprecated since IPython 8.0, use from `ipykernel.kernelapp.launch_new_instance`",
159 160 DeprecationWarning,
160 161 stacklevel=2,
161 162 )
162 163 from ipykernel.kernelapp import launch_new_instance
163 164 return launch_new_instance(argv=argv, **kwargs)
@@ -1,3389 +1,3378
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 α
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 α
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press :kbd:`Tab` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\α<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 :std:configtrait:`Completer.backslash_combining_completions` option to
63 63 ``False``.
64 64
65 65
66 66 Experimental
67 67 ============
68 68
69 69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 70 generate completions both using static analysis of the code, and dynamically
71 71 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
72 72 for Python. The APIs attached to this new mechanism is unstable and will
73 73 raise unless use in an :any:`provisionalcompleter` context manager.
74 74
75 75 You will find that the following are experimental:
76 76
77 77 - :any:`provisionalcompleter`
78 78 - :any:`IPCompleter.completions`
79 79 - :any:`Completion`
80 80 - :any:`rectify_completions`
81 81
82 82 .. note::
83 83
84 84 better name for :any:`rectify_completions` ?
85 85
86 86 We welcome any feedback on these new API, and we also encourage you to try this
87 87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 88 to have extra logging information if :any:`jedi` is crashing, or if current
89 89 IPython completer pending deprecations are returning results not yet handled
90 90 by :any:`jedi`
91 91
92 92 Using Jedi for tab completion allow snippets like the following to work without
93 93 having to execute any code:
94 94
95 95 >>> myvar = ['hello', 42]
96 96 ... myvar[1].bi<tab>
97 97
98 98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 99 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
100 100 option.
101 101
102 102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 103 current development version to get better completions.
104 104
105 105 Matchers
106 106 ========
107 107
108 108 All completions routines are implemented using unified *Matchers* API.
109 109 The matchers API is provisional and subject to change without notice.
110 110
111 111 The built-in matchers include:
112 112
113 113 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
114 114 - :any:`IPCompleter.magic_matcher`: completions for magics,
115 115 - :any:`IPCompleter.unicode_name_matcher`,
116 116 :any:`IPCompleter.fwd_unicode_matcher`
117 117 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
118 118 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
119 119 - :any:`IPCompleter.file_matcher`: paths to files and directories,
120 120 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
121 121 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
122 122 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
123 123 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
124 124 implementation in :any:`InteractiveShell` which uses IPython hooks system
125 125 (`complete_command`) with string dispatch (including regular expressions).
126 126 Differently to other matchers, ``custom_completer_matcher`` will not suppress
127 127 Jedi results to match behaviour in earlier IPython versions.
128 128
129 129 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
130 130
131 131 Matcher API
132 132 -----------
133 133
134 134 Simplifying some details, the ``Matcher`` interface can described as
135 135
136 136 .. code-block::
137 137
138 138 MatcherAPIv1 = Callable[[str], list[str]]
139 139 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
140 140
141 141 Matcher = MatcherAPIv1 | MatcherAPIv2
142 142
143 143 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
144 144 and remains supported as a simplest way for generating completions. This is also
145 145 currently the only API supported by the IPython hooks system `complete_command`.
146 146
147 147 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
148 148 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
149 149 and requires a literal ``2`` for v2 Matchers.
150 150
151 151 Once the API stabilises future versions may relax the requirement for specifying
152 152 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
153 153 please do not rely on the presence of ``matcher_api_version`` for any purposes.
154 154
155 155 Suppression of competing matchers
156 156 ---------------------------------
157 157
158 158 By default results from all matchers are combined, in the order determined by
159 159 their priority. Matchers can request to suppress results from subsequent
160 160 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
161 161
162 162 When multiple matchers simultaneously request suppression, the results from of
163 163 the matcher with higher priority will be returned.
164 164
165 165 Sometimes it is desirable to suppress most but not all other matchers;
166 166 this can be achieved by adding a set of identifiers of matchers which
167 167 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
168 168
169 169 The suppression behaviour can is user-configurable via
170 170 :std:configtrait:`IPCompleter.suppress_competing_matchers`.
171 171 """
172 172
173 173
174 174 # Copyright (c) IPython Development Team.
175 175 # Distributed under the terms of the Modified BSD License.
176 176 #
177 177 # Some of this code originated from rlcompleter in the Python standard library
178 178 # Copyright (C) 2001 Python Software Foundation, www.python.org
179 179
180 180 from __future__ import annotations
181 181 import builtins as builtin_mod
182 182 import enum
183 183 import glob
184 184 import inspect
185 185 import itertools
186 186 import keyword
187 187 import os
188 188 import re
189 189 import string
190 190 import sys
191 191 import tokenize
192 192 import time
193 193 import unicodedata
194 194 import uuid
195 195 import warnings
196 196 from ast import literal_eval
197 197 from collections import defaultdict
198 198 from contextlib import contextmanager
199 199 from dataclasses import dataclass
200 200 from functools import cached_property, partial
201 201 from types import SimpleNamespace
202 202 from typing import (
203 203 Iterable,
204 204 Iterator,
205 205 List,
206 206 Tuple,
207 207 Union,
208 208 Any,
209 209 Sequence,
210 210 Dict,
211 211 Optional,
212 212 TYPE_CHECKING,
213 213 Set,
214 214 Sized,
215 215 TypeVar,
216 216 Literal,
217 217 )
218 218
219 219 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
220 220 from IPython.core.error import TryNext
221 221 from IPython.core.inputtransformer2 import ESC_MAGIC
222 222 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
223 223 from IPython.core.oinspect import InspectColors
224 224 from IPython.testing.skipdoctest import skip_doctest
225 225 from IPython.utils import generics
226 226 from IPython.utils.decorators import sphinx_options
227 227 from IPython.utils.dir2 import dir2, get_real_method
228 228 from IPython.utils.docs import GENERATING_DOCUMENTATION
229 229 from IPython.utils.path import ensure_dir_exists
230 230 from IPython.utils.process import arg_split
231 231 from traitlets import (
232 232 Bool,
233 233 Enum,
234 234 Int,
235 235 List as ListTrait,
236 236 Unicode,
237 237 Dict as DictTrait,
238 238 Union as UnionTrait,
239 239 observe,
240 240 )
241 241 from traitlets.config.configurable import Configurable
242 242
243 243 import __main__
244 244
245 from typing import cast
246 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
247
248
245 249 # skip module docstests
246 250 __skip_doctest__ = True
247 251
248 252
249 253 try:
250 254 import jedi
251 255 jedi.settings.case_insensitive_completion = False
252 256 import jedi.api.helpers
253 257 import jedi.api.classes
254 258 JEDI_INSTALLED = True
255 259 except ImportError:
256 260 JEDI_INSTALLED = False
257 261
258 262
259 if TYPE_CHECKING or GENERATING_DOCUMENTATION and sys.version_info >= (3, 11):
260 from typing import cast
261 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
262 else:
263 from typing import Generic
264
265 def cast(type_, obj):
266 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
267 return obj
268 263
269 # do not require on runtime
270 NotRequired = Tuple # requires Python >=3.11
271 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
272 Protocol = object # requires Python >=3.8
273 TypeAlias = Any # requires Python >=3.10
274 TypeGuard = Generic # requires Python >=3.10
275 264 if GENERATING_DOCUMENTATION:
276 265 from typing import TypedDict
277 266
278 267 # -----------------------------------------------------------------------------
279 268 # Globals
280 269 #-----------------------------------------------------------------------------
281 270
282 271 # ranges where we have most of the valid unicode names. We could be more finer
283 272 # grained but is it worth it for performance While unicode have character in the
284 273 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
285 274 # write this). With below range we cover them all, with a density of ~67%
286 275 # biggest next gap we consider only adds up about 1% density and there are 600
287 276 # gaps that would need hard coding.
288 277 _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
289 278
290 279 # Public API
291 280 __all__ = ["Completer", "IPCompleter"]
292 281
293 282 if sys.platform == 'win32':
294 283 PROTECTABLES = ' '
295 284 else:
296 285 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
297 286
298 287 # Protect against returning an enormous number of completions which the frontend
299 288 # may have trouble processing.
300 289 MATCHES_LIMIT = 500
301 290
302 291 # Completion type reported when no type can be inferred.
303 292 _UNKNOWN_TYPE = "<unknown>"
304 293
305 294 # sentinel value to signal lack of a match
306 295 not_found = object()
307 296
308 297 class ProvisionalCompleterWarning(FutureWarning):
309 298 """
310 299 Exception raise by an experimental feature in this module.
311 300
312 301 Wrap code in :any:`provisionalcompleter` context manager if you
313 302 are certain you want to use an unstable feature.
314 303 """
315 304 pass
316 305
317 306 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
318 307
319 308
320 309 @skip_doctest
321 310 @contextmanager
322 311 def provisionalcompleter(action='ignore'):
323 312 """
324 313 This context manager has to be used in any place where unstable completer
325 314 behavior and API may be called.
326 315
327 316 >>> with provisionalcompleter():
328 317 ... completer.do_experimental_things() # works
329 318
330 319 >>> completer.do_experimental_things() # raises.
331 320
332 321 .. note::
333 322
334 323 Unstable
335 324
336 325 By using this context manager you agree that the API in use may change
337 326 without warning, and that you won't complain if they do so.
338 327
339 328 You also understand that, if the API is not to your liking, you should report
340 329 a bug to explain your use case upstream.
341 330
342 331 We'll be happy to get your feedback, feature requests, and improvements on
343 332 any of the unstable APIs!
344 333 """
345 334 with warnings.catch_warnings():
346 335 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
347 336 yield
348 337
349 338
350 339 def has_open_quotes(s):
351 340 """Return whether a string has open quotes.
352 341
353 342 This simply counts whether the number of quote characters of either type in
354 343 the string is odd.
355 344
356 345 Returns
357 346 -------
358 347 If there is an open quote, the quote character is returned. Else, return
359 348 False.
360 349 """
361 350 # We check " first, then ', so complex cases with nested quotes will get
362 351 # the " to take precedence.
363 352 if s.count('"') % 2:
364 353 return '"'
365 354 elif s.count("'") % 2:
366 355 return "'"
367 356 else:
368 357 return False
369 358
370 359
371 360 def protect_filename(s, protectables=PROTECTABLES):
372 361 """Escape a string to protect certain characters."""
373 362 if set(s) & set(protectables):
374 363 if sys.platform == "win32":
375 364 return '"' + s + '"'
376 365 else:
377 366 return "".join(("\\" + c if c in protectables else c) for c in s)
378 367 else:
379 368 return s
380 369
381 370
382 371 def expand_user(path:str) -> Tuple[str, bool, str]:
383 372 """Expand ``~``-style usernames in strings.
384 373
385 374 This is similar to :func:`os.path.expanduser`, but it computes and returns
386 375 extra information that will be useful if the input was being used in
387 376 computing completions, and you wish to return the completions with the
388 377 original '~' instead of its expanded value.
389 378
390 379 Parameters
391 380 ----------
392 381 path : str
393 382 String to be expanded. If no ~ is present, the output is the same as the
394 383 input.
395 384
396 385 Returns
397 386 -------
398 387 newpath : str
399 388 Result of ~ expansion in the input path.
400 389 tilde_expand : bool
401 390 Whether any expansion was performed or not.
402 391 tilde_val : str
403 392 The value that ~ was replaced with.
404 393 """
405 394 # Default values
406 395 tilde_expand = False
407 396 tilde_val = ''
408 397 newpath = path
409 398
410 399 if path.startswith('~'):
411 400 tilde_expand = True
412 401 rest = len(path)-1
413 402 newpath = os.path.expanduser(path)
414 403 if rest:
415 404 tilde_val = newpath[:-rest]
416 405 else:
417 406 tilde_val = newpath
418 407
419 408 return newpath, tilde_expand, tilde_val
420 409
421 410
422 411 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
423 412 """Does the opposite of expand_user, with its outputs.
424 413 """
425 414 if tilde_expand:
426 415 return path.replace(tilde_val, '~')
427 416 else:
428 417 return path
429 418
430 419
431 420 def completions_sorting_key(word):
432 421 """key for sorting completions
433 422
434 423 This does several things:
435 424
436 425 - Demote any completions starting with underscores to the end
437 426 - Insert any %magic and %%cellmagic completions in the alphabetical order
438 427 by their name
439 428 """
440 429 prio1, prio2 = 0, 0
441 430
442 431 if word.startswith('__'):
443 432 prio1 = 2
444 433 elif word.startswith('_'):
445 434 prio1 = 1
446 435
447 436 if word.endswith('='):
448 437 prio1 = -1
449 438
450 439 if word.startswith('%%'):
451 440 # If there's another % in there, this is something else, so leave it alone
452 441 if not "%" in word[2:]:
453 442 word = word[2:]
454 443 prio2 = 2
455 444 elif word.startswith('%'):
456 445 if not "%" in word[1:]:
457 446 word = word[1:]
458 447 prio2 = 1
459 448
460 449 return prio1, word, prio2
461 450
462 451
463 452 class _FakeJediCompletion:
464 453 """
465 454 This is a workaround to communicate to the UI that Jedi has crashed and to
466 455 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
467 456
468 457 Added in IPython 6.0 so should likely be removed for 7.0
469 458
470 459 """
471 460
472 461 def __init__(self, name):
473 462
474 463 self.name = name
475 464 self.complete = name
476 465 self.type = 'crashed'
477 466 self.name_with_symbols = name
478 467 self.signature = ""
479 468 self._origin = "fake"
480 469 self.text = "crashed"
481 470
482 471 def __repr__(self):
483 472 return '<Fake completion object jedi has crashed>'
484 473
485 474
486 475 _JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
487 476
488 477
489 478 class Completion:
490 479 """
491 480 Completion object used and returned by IPython completers.
492 481
493 482 .. warning::
494 483
495 484 Unstable
496 485
497 486 This function is unstable, API may change without warning.
498 487 It will also raise unless use in proper context manager.
499 488
500 489 This act as a middle ground :any:`Completion` object between the
501 490 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
502 491 object. While Jedi need a lot of information about evaluator and how the
503 492 code should be ran/inspected, PromptToolkit (and other frontend) mostly
504 493 need user facing information.
505 494
506 495 - Which range should be replaced replaced by what.
507 496 - Some metadata (like completion type), or meta information to displayed to
508 497 the use user.
509 498
510 499 For debugging purpose we can also store the origin of the completion (``jedi``,
511 500 ``IPython.python_matches``, ``IPython.magics_matches``...).
512 501 """
513 502
514 503 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
515 504
516 505 def __init__(
517 506 self,
518 507 start: int,
519 508 end: int,
520 509 text: str,
521 510 *,
522 511 type: Optional[str] = None,
523 512 _origin="",
524 513 signature="",
525 514 ) -> None:
526 515 warnings.warn(
527 516 "``Completion`` is a provisional API (as of IPython 6.0). "
528 517 "It may change without warnings. "
529 518 "Use in corresponding context manager.",
530 519 category=ProvisionalCompleterWarning,
531 520 stacklevel=2,
532 521 )
533 522
534 523 self.start = start
535 524 self.end = end
536 525 self.text = text
537 526 self.type = type
538 527 self.signature = signature
539 528 self._origin = _origin
540 529
541 530 def __repr__(self):
542 531 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
543 532 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
544 533
545 534 def __eq__(self, other) -> bool:
546 535 """
547 536 Equality and hash do not hash the type (as some completer may not be
548 537 able to infer the type), but are use to (partially) de-duplicate
549 538 completion.
550 539
551 540 Completely de-duplicating completion is a bit tricker that just
552 541 comparing as it depends on surrounding text, which Completions are not
553 542 aware of.
554 543 """
555 544 return self.start == other.start and \
556 545 self.end == other.end and \
557 546 self.text == other.text
558 547
559 548 def __hash__(self):
560 549 return hash((self.start, self.end, self.text))
561 550
562 551
563 552 class SimpleCompletion:
564 553 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
565 554
566 555 .. warning::
567 556
568 557 Provisional
569 558
570 559 This class is used to describe the currently supported attributes of
571 560 simple completion items, and any additional implementation details
572 561 should not be relied on. Additional attributes may be included in
573 562 future versions, and meaning of text disambiguated from the current
574 563 dual meaning of "text to insert" and "text to used as a label".
575 564 """
576 565
577 566 __slots__ = ["text", "type"]
578 567
579 568 def __init__(self, text: str, *, type: Optional[str] = None):
580 569 self.text = text
581 570 self.type = type
582 571
583 572 def __repr__(self):
584 573 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
585 574
586 575
587 576 class _MatcherResultBase(TypedDict):
588 577 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
589 578
590 579 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
591 580 matched_fragment: NotRequired[str]
592 581
593 582 #: Whether to suppress results from all other matchers (True), some
594 583 #: matchers (set of identifiers) or none (False); default is False.
595 584 suppress: NotRequired[Union[bool, Set[str]]]
596 585
597 586 #: Identifiers of matchers which should NOT be suppressed when this matcher
598 587 #: requests to suppress all other matchers; defaults to an empty set.
599 588 do_not_suppress: NotRequired[Set[str]]
600 589
601 590 #: Are completions already ordered and should be left as-is? default is False.
602 591 ordered: NotRequired[bool]
603 592
604 593
605 594 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
606 595 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
607 596 """Result of new-style completion matcher."""
608 597
609 598 # note: TypedDict is added again to the inheritance chain
610 599 # in order to get __orig_bases__ for documentation
611 600
612 601 #: List of candidate completions
613 602 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
614 603
615 604
616 605 class _JediMatcherResult(_MatcherResultBase):
617 606 """Matching result returned by Jedi (will be processed differently)"""
618 607
619 608 #: list of candidate completions
620 609 completions: Iterator[_JediCompletionLike]
621 610
622 611
623 612 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
624 613 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
625 614
626 615
627 616 @dataclass
628 617 class CompletionContext:
629 618 """Completion context provided as an argument to matchers in the Matcher API v2."""
630 619
631 620 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
632 621 # which was not explicitly visible as an argument of the matcher, making any refactor
633 622 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
634 623 # from the completer, and make substituting them in sub-classes easier.
635 624
636 625 #: Relevant fragment of code directly preceding the cursor.
637 626 #: The extraction of token is implemented via splitter heuristic
638 627 #: (following readline behaviour for legacy reasons), which is user configurable
639 628 #: (by switching the greedy mode).
640 629 token: str
641 630
642 631 #: The full available content of the editor or buffer
643 632 full_text: str
644 633
645 634 #: Cursor position in the line (the same for ``full_text`` and ``text``).
646 635 cursor_position: int
647 636
648 637 #: Cursor line in ``full_text``.
649 638 cursor_line: int
650 639
651 640 #: The maximum number of completions that will be used downstream.
652 641 #: Matchers can use this information to abort early.
653 642 #: The built-in Jedi matcher is currently excepted from this limit.
654 643 # If not given, return all possible completions.
655 644 limit: Optional[int]
656 645
657 646 @cached_property
658 647 def text_until_cursor(self) -> str:
659 648 return self.line_with_cursor[: self.cursor_position]
660 649
661 650 @cached_property
662 651 def line_with_cursor(self) -> str:
663 652 return self.full_text.split("\n")[self.cursor_line]
664 653
665 654
666 655 #: Matcher results for API v2.
667 656 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
668 657
669 658
670 659 class _MatcherAPIv1Base(Protocol):
671 660 def __call__(self, text: str) -> List[str]:
672 661 """Call signature."""
673 662 ...
674 663
675 664 #: Used to construct the default matcher identifier
676 665 __qualname__: str
677 666
678 667
679 668 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
680 669 #: API version
681 670 matcher_api_version: Optional[Literal[1]]
682 671
683 672 def __call__(self, text: str) -> List[str]:
684 673 """Call signature."""
685 674 ...
686 675
687 676
688 677 #: Protocol describing Matcher API v1.
689 678 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
690 679
691 680
692 681 class MatcherAPIv2(Protocol):
693 682 """Protocol describing Matcher API v2."""
694 683
695 684 #: API version
696 685 matcher_api_version: Literal[2] = 2
697 686
698 687 def __call__(self, context: CompletionContext) -> MatcherResult:
699 688 """Call signature."""
700 689 ...
701 690
702 691 #: Used to construct the default matcher identifier
703 692 __qualname__: str
704 693
705 694
706 695 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
707 696
708 697
709 698 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
710 699 api_version = _get_matcher_api_version(matcher)
711 700 return api_version == 1
712 701
713 702
714 703 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
715 704 api_version = _get_matcher_api_version(matcher)
716 705 return api_version == 2
717 706
718 707
719 708 def _is_sizable(value: Any) -> TypeGuard[Sized]:
720 709 """Determines whether objects is sizable"""
721 710 return hasattr(value, "__len__")
722 711
723 712
724 713 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
725 714 """Determines whether objects is sizable"""
726 715 return hasattr(value, "__next__")
727 716
728 717
729 718 def has_any_completions(result: MatcherResult) -> bool:
730 719 """Check if any result includes any completions."""
731 720 completions = result["completions"]
732 721 if _is_sizable(completions):
733 722 return len(completions) != 0
734 723 if _is_iterator(completions):
735 724 try:
736 725 old_iterator = completions
737 726 first = next(old_iterator)
738 727 result["completions"] = cast(
739 728 Iterator[SimpleCompletion],
740 729 itertools.chain([first], old_iterator),
741 730 )
742 731 return True
743 732 except StopIteration:
744 733 return False
745 734 raise ValueError(
746 735 "Completions returned by matcher need to be an Iterator or a Sizable"
747 736 )
748 737
749 738
750 739 def completion_matcher(
751 740 *,
752 741 priority: Optional[float] = None,
753 742 identifier: Optional[str] = None,
754 743 api_version: int = 1,
755 744 ):
756 745 """Adds attributes describing the matcher.
757 746
758 747 Parameters
759 748 ----------
760 749 priority : Optional[float]
761 750 The priority of the matcher, determines the order of execution of matchers.
762 751 Higher priority means that the matcher will be executed first. Defaults to 0.
763 752 identifier : Optional[str]
764 753 identifier of the matcher allowing users to modify the behaviour via traitlets,
765 754 and also used to for debugging (will be passed as ``origin`` with the completions).
766 755
767 756 Defaults to matcher function's ``__qualname__`` (for example,
768 757 ``IPCompleter.file_matcher`` for the built-in matched defined
769 758 as a ``file_matcher`` method of the ``IPCompleter`` class).
770 759 api_version: Optional[int]
771 760 version of the Matcher API used by this matcher.
772 761 Currently supported values are 1 and 2.
773 762 Defaults to 1.
774 763 """
775 764
776 765 def wrapper(func: Matcher):
777 766 func.matcher_priority = priority or 0 # type: ignore
778 767 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
779 768 func.matcher_api_version = api_version # type: ignore
780 769 if TYPE_CHECKING:
781 770 if api_version == 1:
782 771 func = cast(MatcherAPIv1, func)
783 772 elif api_version == 2:
784 773 func = cast(MatcherAPIv2, func)
785 774 return func
786 775
787 776 return wrapper
788 777
789 778
790 779 def _get_matcher_priority(matcher: Matcher):
791 780 return getattr(matcher, "matcher_priority", 0)
792 781
793 782
794 783 def _get_matcher_id(matcher: Matcher):
795 784 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
796 785
797 786
798 787 def _get_matcher_api_version(matcher):
799 788 return getattr(matcher, "matcher_api_version", 1)
800 789
801 790
802 791 context_matcher = partial(completion_matcher, api_version=2)
803 792
804 793
805 794 _IC = Iterable[Completion]
806 795
807 796
808 797 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
809 798 """
810 799 Deduplicate a set of completions.
811 800
812 801 .. warning::
813 802
814 803 Unstable
815 804
816 805 This function is unstable, API may change without warning.
817 806
818 807 Parameters
819 808 ----------
820 809 text : str
821 810 text that should be completed.
822 811 completions : Iterator[Completion]
823 812 iterator over the completions to deduplicate
824 813
825 814 Yields
826 815 ------
827 816 `Completions` objects
828 817 Completions coming from multiple sources, may be different but end up having
829 818 the same effect when applied to ``text``. If this is the case, this will
830 819 consider completions as equal and only emit the first encountered.
831 820 Not folded in `completions()` yet for debugging purpose, and to detect when
832 821 the IPython completer does return things that Jedi does not, but should be
833 822 at some point.
834 823 """
835 824 completions = list(completions)
836 825 if not completions:
837 826 return
838 827
839 828 new_start = min(c.start for c in completions)
840 829 new_end = max(c.end for c in completions)
841 830
842 831 seen = set()
843 832 for c in completions:
844 833 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
845 834 if new_text not in seen:
846 835 yield c
847 836 seen.add(new_text)
848 837
849 838
850 839 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
851 840 """
852 841 Rectify a set of completions to all have the same ``start`` and ``end``
853 842
854 843 .. warning::
855 844
856 845 Unstable
857 846
858 847 This function is unstable, API may change without warning.
859 848 It will also raise unless use in proper context manager.
860 849
861 850 Parameters
862 851 ----------
863 852 text : str
864 853 text that should be completed.
865 854 completions : Iterator[Completion]
866 855 iterator over the completions to rectify
867 856 _debug : bool
868 857 Log failed completion
869 858
870 859 Notes
871 860 -----
872 861 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
873 862 the Jupyter Protocol requires them to behave like so. This will readjust
874 863 the completion to have the same ``start`` and ``end`` by padding both
875 864 extremities with surrounding text.
876 865
877 866 During stabilisation should support a ``_debug`` option to log which
878 867 completion are return by the IPython completer and not found in Jedi in
879 868 order to make upstream bug report.
880 869 """
881 870 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
882 871 "It may change without warnings. "
883 872 "Use in corresponding context manager.",
884 873 category=ProvisionalCompleterWarning, stacklevel=2)
885 874
886 875 completions = list(completions)
887 876 if not completions:
888 877 return
889 878 starts = (c.start for c in completions)
890 879 ends = (c.end for c in completions)
891 880
892 881 new_start = min(starts)
893 882 new_end = max(ends)
894 883
895 884 seen_jedi = set()
896 885 seen_python_matches = set()
897 886 for c in completions:
898 887 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
899 888 if c._origin == 'jedi':
900 889 seen_jedi.add(new_text)
901 890 elif c._origin == "IPCompleter.python_matcher":
902 891 seen_python_matches.add(new_text)
903 892 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
904 893 diff = seen_python_matches.difference(seen_jedi)
905 894 if diff and _debug:
906 895 print('IPython.python matches have extras:', diff)
907 896
908 897
909 898 if sys.platform == 'win32':
910 899 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
911 900 else:
912 901 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
913 902
914 903 GREEDY_DELIMS = ' =\r\n'
915 904
916 905
917 906 class CompletionSplitter(object):
918 907 """An object to split an input line in a manner similar to readline.
919 908
920 909 By having our own implementation, we can expose readline-like completion in
921 910 a uniform manner to all frontends. This object only needs to be given the
922 911 line of text to be split and the cursor position on said line, and it
923 912 returns the 'word' to be completed on at the cursor after splitting the
924 913 entire line.
925 914
926 915 What characters are used as splitting delimiters can be controlled by
927 916 setting the ``delims`` attribute (this is a property that internally
928 917 automatically builds the necessary regular expression)"""
929 918
930 919 # Private interface
931 920
932 921 # A string of delimiter characters. The default value makes sense for
933 922 # IPython's most typical usage patterns.
934 923 _delims = DELIMS
935 924
936 925 # The expression (a normal string) to be compiled into a regular expression
937 926 # for actual splitting. We store it as an attribute mostly for ease of
938 927 # debugging, since this type of code can be so tricky to debug.
939 928 _delim_expr = None
940 929
941 930 # The regular expression that does the actual splitting
942 931 _delim_re = None
943 932
944 933 def __init__(self, delims=None):
945 934 delims = CompletionSplitter._delims if delims is None else delims
946 935 self.delims = delims
947 936
948 937 @property
949 938 def delims(self):
950 939 """Return the string of delimiter characters."""
951 940 return self._delims
952 941
953 942 @delims.setter
954 943 def delims(self, delims):
955 944 """Set the delimiters for line splitting."""
956 945 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
957 946 self._delim_re = re.compile(expr)
958 947 self._delims = delims
959 948 self._delim_expr = expr
960 949
961 950 def split_line(self, line, cursor_pos=None):
962 951 """Split a line of text with a cursor at the given position.
963 952 """
964 953 l = line if cursor_pos is None else line[:cursor_pos]
965 954 return self._delim_re.split(l)[-1]
966 955
967 956
968 957
969 958 class Completer(Configurable):
970 959
971 960 greedy = Bool(
972 961 False,
973 962 help="""Activate greedy completion.
974 963
975 964 .. deprecated:: 8.8
976 965 Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
977 966
978 967 When enabled in IPython 8.8 or newer, changes configuration as follows:
979 968
980 969 - ``Completer.evaluation = 'unsafe'``
981 970 - ``Completer.auto_close_dict_keys = True``
982 971 """,
983 972 ).tag(config=True)
984 973
985 974 evaluation = Enum(
986 975 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
987 976 default_value="limited",
988 977 help="""Policy for code evaluation under completion.
989 978
990 979 Successive options allow to enable more eager evaluation for better
991 980 completion suggestions, including for nested dictionaries, nested lists,
992 981 or even results of function calls.
993 982 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
994 983 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
995 984
996 985 Allowed values are:
997 986
998 987 - ``forbidden``: no evaluation of code is permitted,
999 988 - ``minimal``: evaluation of literals and access to built-in namespace;
1000 989 no item/attribute evaluationm no access to locals/globals,
1001 990 no evaluation of any operations or comparisons.
1002 991 - ``limited``: access to all namespaces, evaluation of hard-coded methods
1003 992 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
1004 993 :any:`object.__getitem__`) on allow-listed objects (for example:
1005 994 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
1006 995 - ``unsafe``: evaluation of all methods and function calls but not of
1007 996 syntax with side-effects like `del x`,
1008 997 - ``dangerous``: completely arbitrary evaluation.
1009 998 """,
1010 999 ).tag(config=True)
1011 1000
1012 1001 use_jedi = Bool(default_value=JEDI_INSTALLED,
1013 1002 help="Experimental: Use Jedi to generate autocompletions. "
1014 1003 "Default to True if jedi is installed.").tag(config=True)
1015 1004
1016 1005 jedi_compute_type_timeout = Int(default_value=400,
1017 1006 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
1018 1007 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
1019 1008 performance by preventing jedi to build its cache.
1020 1009 """).tag(config=True)
1021 1010
1022 1011 debug = Bool(default_value=False,
1023 1012 help='Enable debug for the Completer. Mostly print extra '
1024 1013 'information for experimental jedi integration.')\
1025 1014 .tag(config=True)
1026 1015
1027 1016 backslash_combining_completions = Bool(True,
1028 1017 help="Enable unicode completions, e.g. \\alpha<tab> . "
1029 1018 "Includes completion of latex commands, unicode names, and expanding "
1030 1019 "unicode characters back to latex commands.").tag(config=True)
1031 1020
1032 1021 auto_close_dict_keys = Bool(
1033 1022 False,
1034 1023 help="""
1035 1024 Enable auto-closing dictionary keys.
1036 1025
1037 1026 When enabled string keys will be suffixed with a final quote
1038 1027 (matching the opening quote), tuple keys will also receive a
1039 1028 separating comma if needed, and keys which are final will
1040 1029 receive a closing bracket (``]``).
1041 1030 """,
1042 1031 ).tag(config=True)
1043 1032
1044 1033 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1045 1034 """Create a new completer for the command line.
1046 1035
1047 1036 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
1048 1037
1049 1038 If unspecified, the default namespace where completions are performed
1050 1039 is __main__ (technically, __main__.__dict__). Namespaces should be
1051 1040 given as dictionaries.
1052 1041
1053 1042 An optional second namespace can be given. This allows the completer
1054 1043 to handle cases where both the local and global scopes need to be
1055 1044 distinguished.
1056 1045 """
1057 1046
1058 1047 # Don't bind to namespace quite yet, but flag whether the user wants a
1059 1048 # specific namespace or to use __main__.__dict__. This will allow us
1060 1049 # to bind to __main__.__dict__ at completion time, not now.
1061 1050 if namespace is None:
1062 1051 self.use_main_ns = True
1063 1052 else:
1064 1053 self.use_main_ns = False
1065 1054 self.namespace = namespace
1066 1055
1067 1056 # The global namespace, if given, can be bound directly
1068 1057 if global_namespace is None:
1069 1058 self.global_namespace = {}
1070 1059 else:
1071 1060 self.global_namespace = global_namespace
1072 1061
1073 1062 self.custom_matchers = []
1074 1063
1075 1064 super(Completer, self).__init__(**kwargs)
1076 1065
1077 1066 def complete(self, text, state):
1078 1067 """Return the next possible completion for 'text'.
1079 1068
1080 1069 This is called successively with state == 0, 1, 2, ... until it
1081 1070 returns None. The completion should begin with 'text'.
1082 1071
1083 1072 """
1084 1073 if self.use_main_ns:
1085 1074 self.namespace = __main__.__dict__
1086 1075
1087 1076 if state == 0:
1088 1077 if "." in text:
1089 1078 self.matches = self.attr_matches(text)
1090 1079 else:
1091 1080 self.matches = self.global_matches(text)
1092 1081 try:
1093 1082 return self.matches[state]
1094 1083 except IndexError:
1095 1084 return None
1096 1085
1097 1086 def global_matches(self, text):
1098 1087 """Compute matches when text is a simple name.
1099 1088
1100 1089 Return a list of all keywords, built-in functions and names currently
1101 1090 defined in self.namespace or self.global_namespace that match.
1102 1091
1103 1092 """
1104 1093 matches = []
1105 1094 match_append = matches.append
1106 1095 n = len(text)
1107 1096 for lst in [
1108 1097 keyword.kwlist,
1109 1098 builtin_mod.__dict__.keys(),
1110 1099 list(self.namespace.keys()),
1111 1100 list(self.global_namespace.keys()),
1112 1101 ]:
1113 1102 for word in lst:
1114 1103 if word[:n] == text and word != "__builtins__":
1115 1104 match_append(word)
1116 1105
1117 1106 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1118 1107 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1119 1108 shortened = {
1120 1109 "_".join([sub[0] for sub in word.split("_")]): word
1121 1110 for word in lst
1122 1111 if snake_case_re.match(word)
1123 1112 }
1124 1113 for word in shortened.keys():
1125 1114 if word[:n] == text and word != "__builtins__":
1126 1115 match_append(shortened[word])
1127 1116 return matches
1128 1117
1129 1118 def attr_matches(self, text):
1130 1119 """Compute matches when text contains a dot.
1131 1120
1132 1121 Assuming the text is of the form NAME.NAME....[NAME], and is
1133 1122 evaluatable in self.namespace or self.global_namespace, it will be
1134 1123 evaluated and its attributes (as revealed by dir()) are used as
1135 1124 possible completions. (For class instances, class members are
1136 1125 also considered.)
1137 1126
1138 1127 WARNING: this can still invoke arbitrary C code, if an object
1139 1128 with a __getattr__ hook is evaluated.
1140 1129
1141 1130 """
1142 1131 return self._attr_matches(text)[0]
1143 1132
1144 1133 def _attr_matches(self, text, include_prefix=True) -> Tuple[Sequence[str], str]:
1145 1134 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1146 1135 if not m2:
1147 1136 return [], ""
1148 1137 expr, attr = m2.group(1, 2)
1149 1138
1150 1139 obj = self._evaluate_expr(expr)
1151 1140
1152 1141 if obj is not_found:
1153 1142 return [], ""
1154 1143
1155 1144 if self.limit_to__all__ and hasattr(obj, '__all__'):
1156 1145 words = get__all__entries(obj)
1157 1146 else:
1158 1147 words = dir2(obj)
1159 1148
1160 1149 try:
1161 1150 words = generics.complete_object(obj, words)
1162 1151 except TryNext:
1163 1152 pass
1164 1153 except AssertionError:
1165 1154 raise
1166 1155 except Exception:
1167 1156 # Silence errors from completion function
1168 1157 pass
1169 1158 # Build match list to return
1170 1159 n = len(attr)
1171 1160
1172 1161 # Note: ideally we would just return words here and the prefix
1173 1162 # reconciliator would know that we intend to append to rather than
1174 1163 # replace the input text; this requires refactoring to return range
1175 1164 # which ought to be replaced (as does jedi).
1176 1165 if include_prefix:
1177 1166 tokens = _parse_tokens(expr)
1178 1167 rev_tokens = reversed(tokens)
1179 1168 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1180 1169 name_turn = True
1181 1170
1182 1171 parts = []
1183 1172 for token in rev_tokens:
1184 1173 if token.type in skip_over:
1185 1174 continue
1186 1175 if token.type == tokenize.NAME and name_turn:
1187 1176 parts.append(token.string)
1188 1177 name_turn = False
1189 1178 elif (
1190 1179 token.type == tokenize.OP and token.string == "." and not name_turn
1191 1180 ):
1192 1181 parts.append(token.string)
1193 1182 name_turn = True
1194 1183 else:
1195 1184 # short-circuit if not empty nor name token
1196 1185 break
1197 1186
1198 1187 prefix_after_space = "".join(reversed(parts))
1199 1188 else:
1200 1189 prefix_after_space = ""
1201 1190
1202 1191 return (
1203 1192 ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr],
1204 1193 "." + attr,
1205 1194 )
1206 1195
1207 1196 def _evaluate_expr(self, expr):
1208 1197 obj = not_found
1209 1198 done = False
1210 1199 while not done and expr:
1211 1200 try:
1212 1201 obj = guarded_eval(
1213 1202 expr,
1214 1203 EvaluationContext(
1215 1204 globals=self.global_namespace,
1216 1205 locals=self.namespace,
1217 1206 evaluation=self.evaluation,
1218 1207 ),
1219 1208 )
1220 1209 done = True
1221 1210 except Exception as e:
1222 1211 if self.debug:
1223 1212 print("Evaluation exception", e)
1224 1213 # trim the expression to remove any invalid prefix
1225 1214 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1226 1215 # where parenthesis is not closed.
1227 1216 # TODO: make this faster by reusing parts of the computation?
1228 1217 expr = expr[1:]
1229 1218 return obj
1230 1219
1231 1220 def get__all__entries(obj):
1232 1221 """returns the strings in the __all__ attribute"""
1233 1222 try:
1234 1223 words = getattr(obj, '__all__')
1235 1224 except:
1236 1225 return []
1237 1226
1238 1227 return [w for w in words if isinstance(w, str)]
1239 1228
1240 1229
1241 1230 class _DictKeyState(enum.Flag):
1242 1231 """Represent state of the key match in context of other possible matches.
1243 1232
1244 1233 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1245 1234 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1246 1235 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1247 1236 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1248 1237 """
1249 1238
1250 1239 BASELINE = 0
1251 1240 END_OF_ITEM = enum.auto()
1252 1241 END_OF_TUPLE = enum.auto()
1253 1242 IN_TUPLE = enum.auto()
1254 1243
1255 1244
1256 1245 def _parse_tokens(c):
1257 1246 """Parse tokens even if there is an error."""
1258 1247 tokens = []
1259 1248 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1260 1249 while True:
1261 1250 try:
1262 1251 tokens.append(next(token_generator))
1263 1252 except tokenize.TokenError:
1264 1253 return tokens
1265 1254 except StopIteration:
1266 1255 return tokens
1267 1256
1268 1257
1269 1258 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1270 1259 """Match any valid Python numeric literal in a prefix of dictionary keys.
1271 1260
1272 1261 References:
1273 1262 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1274 1263 - https://docs.python.org/3/library/tokenize.html
1275 1264 """
1276 1265 if prefix[-1].isspace():
1277 1266 # if user typed a space we do not have anything to complete
1278 1267 # even if there was a valid number token before
1279 1268 return None
1280 1269 tokens = _parse_tokens(prefix)
1281 1270 rev_tokens = reversed(tokens)
1282 1271 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1283 1272 number = None
1284 1273 for token in rev_tokens:
1285 1274 if token.type in skip_over:
1286 1275 continue
1287 1276 if number is None:
1288 1277 if token.type == tokenize.NUMBER:
1289 1278 number = token.string
1290 1279 continue
1291 1280 else:
1292 1281 # we did not match a number
1293 1282 return None
1294 1283 if token.type == tokenize.OP:
1295 1284 if token.string == ",":
1296 1285 break
1297 1286 if token.string in {"+", "-"}:
1298 1287 number = token.string + number
1299 1288 else:
1300 1289 return None
1301 1290 return number
1302 1291
1303 1292
1304 1293 _INT_FORMATS = {
1305 1294 "0b": bin,
1306 1295 "0o": oct,
1307 1296 "0x": hex,
1308 1297 }
1309 1298
1310 1299
1311 1300 def match_dict_keys(
1312 1301 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1313 1302 prefix: str,
1314 1303 delims: str,
1315 1304 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1316 1305 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1317 1306 """Used by dict_key_matches, matching the prefix to a list of keys
1318 1307
1319 1308 Parameters
1320 1309 ----------
1321 1310 keys
1322 1311 list of keys in dictionary currently being completed.
1323 1312 prefix
1324 1313 Part of the text already typed by the user. E.g. `mydict[b'fo`
1325 1314 delims
1326 1315 String of delimiters to consider when finding the current key.
1327 1316 extra_prefix : optional
1328 1317 Part of the text already typed in multi-key index cases. E.g. for
1329 1318 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1330 1319
1331 1320 Returns
1332 1321 -------
1333 1322 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1334 1323 ``quote`` being the quote that need to be used to close current string.
1335 1324 ``token_start`` the position where the replacement should start occurring,
1336 1325 ``matches`` a dictionary of replacement/completion keys on keys and values
1337 1326 indicating whether the state.
1338 1327 """
1339 1328 prefix_tuple = extra_prefix if extra_prefix else ()
1340 1329
1341 1330 prefix_tuple_size = sum(
1342 1331 [
1343 1332 # for pandas, do not count slices as taking space
1344 1333 not isinstance(k, slice)
1345 1334 for k in prefix_tuple
1346 1335 ]
1347 1336 )
1348 1337 text_serializable_types = (str, bytes, int, float, slice)
1349 1338
1350 1339 def filter_prefix_tuple(key):
1351 1340 # Reject too short keys
1352 1341 if len(key) <= prefix_tuple_size:
1353 1342 return False
1354 1343 # Reject keys which cannot be serialised to text
1355 1344 for k in key:
1356 1345 if not isinstance(k, text_serializable_types):
1357 1346 return False
1358 1347 # Reject keys that do not match the prefix
1359 1348 for k, pt in zip(key, prefix_tuple):
1360 1349 if k != pt and not isinstance(pt, slice):
1361 1350 return False
1362 1351 # All checks passed!
1363 1352 return True
1364 1353
1365 1354 filtered_key_is_final: Dict[Union[str, bytes, int, float], _DictKeyState] = (
1366 1355 defaultdict(lambda: _DictKeyState.BASELINE)
1367 1356 )
1368 1357
1369 1358 for k in keys:
1370 1359 # If at least one of the matches is not final, mark as undetermined.
1371 1360 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1372 1361 # `111` appears final on first match but is not final on the second.
1373 1362
1374 1363 if isinstance(k, tuple):
1375 1364 if filter_prefix_tuple(k):
1376 1365 key_fragment = k[prefix_tuple_size]
1377 1366 filtered_key_is_final[key_fragment] |= (
1378 1367 _DictKeyState.END_OF_TUPLE
1379 1368 if len(k) == prefix_tuple_size + 1
1380 1369 else _DictKeyState.IN_TUPLE
1381 1370 )
1382 1371 elif prefix_tuple_size > 0:
1383 1372 # we are completing a tuple but this key is not a tuple,
1384 1373 # so we should ignore it
1385 1374 pass
1386 1375 else:
1387 1376 if isinstance(k, text_serializable_types):
1388 1377 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1389 1378
1390 1379 filtered_keys = filtered_key_is_final.keys()
1391 1380
1392 1381 if not prefix:
1393 1382 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1394 1383
1395 1384 quote_match = re.search("(?:\"|')", prefix)
1396 1385 is_user_prefix_numeric = False
1397 1386
1398 1387 if quote_match:
1399 1388 quote = quote_match.group()
1400 1389 valid_prefix = prefix + quote
1401 1390 try:
1402 1391 prefix_str = literal_eval(valid_prefix)
1403 1392 except Exception:
1404 1393 return "", 0, {}
1405 1394 else:
1406 1395 # If it does not look like a string, let's assume
1407 1396 # we are dealing with a number or variable.
1408 1397 number_match = _match_number_in_dict_key_prefix(prefix)
1409 1398
1410 1399 # We do not want the key matcher to suggest variable names so we yield:
1411 1400 if number_match is None:
1412 1401 # The alternative would be to assume that user forgort the quote
1413 1402 # and if the substring matches, suggest adding it at the start.
1414 1403 return "", 0, {}
1415 1404
1416 1405 prefix_str = number_match
1417 1406 is_user_prefix_numeric = True
1418 1407 quote = ""
1419 1408
1420 1409 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1421 1410 token_match = re.search(pattern, prefix, re.UNICODE)
1422 1411 assert token_match is not None # silence mypy
1423 1412 token_start = token_match.start()
1424 1413 token_prefix = token_match.group()
1425 1414
1426 1415 matched: Dict[str, _DictKeyState] = {}
1427 1416
1428 1417 str_key: Union[str, bytes]
1429 1418
1430 1419 for key in filtered_keys:
1431 1420 if isinstance(key, (int, float)):
1432 1421 # User typed a number but this key is not a number.
1433 1422 if not is_user_prefix_numeric:
1434 1423 continue
1435 1424 str_key = str(key)
1436 1425 if isinstance(key, int):
1437 1426 int_base = prefix_str[:2].lower()
1438 1427 # if user typed integer using binary/oct/hex notation:
1439 1428 if int_base in _INT_FORMATS:
1440 1429 int_format = _INT_FORMATS[int_base]
1441 1430 str_key = int_format(key)
1442 1431 else:
1443 1432 # User typed a string but this key is a number.
1444 1433 if is_user_prefix_numeric:
1445 1434 continue
1446 1435 str_key = key
1447 1436 try:
1448 1437 if not str_key.startswith(prefix_str):
1449 1438 continue
1450 1439 except (AttributeError, TypeError, UnicodeError) as e:
1451 1440 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1452 1441 continue
1453 1442
1454 1443 # reformat remainder of key to begin with prefix
1455 1444 rem = str_key[len(prefix_str) :]
1456 1445 # force repr wrapped in '
1457 1446 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1458 1447 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1459 1448 if quote == '"':
1460 1449 # The entered prefix is quoted with ",
1461 1450 # but the match is quoted with '.
1462 1451 # A contained " hence needs escaping for comparison:
1463 1452 rem_repr = rem_repr.replace('"', '\\"')
1464 1453
1465 1454 # then reinsert prefix from start of token
1466 1455 match = "%s%s" % (token_prefix, rem_repr)
1467 1456
1468 1457 matched[match] = filtered_key_is_final[key]
1469 1458 return quote, token_start, matched
1470 1459
1471 1460
1472 1461 def cursor_to_position(text:str, line:int, column:int)->int:
1473 1462 """
1474 1463 Convert the (line,column) position of the cursor in text to an offset in a
1475 1464 string.
1476 1465
1477 1466 Parameters
1478 1467 ----------
1479 1468 text : str
1480 1469 The text in which to calculate the cursor offset
1481 1470 line : int
1482 1471 Line of the cursor; 0-indexed
1483 1472 column : int
1484 1473 Column of the cursor 0-indexed
1485 1474
1486 1475 Returns
1487 1476 -------
1488 1477 Position of the cursor in ``text``, 0-indexed.
1489 1478
1490 1479 See Also
1491 1480 --------
1492 1481 position_to_cursor : reciprocal of this function
1493 1482
1494 1483 """
1495 1484 lines = text.split('\n')
1496 1485 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1497 1486
1498 1487 return sum(len(l) + 1 for l in lines[:line]) + column
1499 1488
1500 1489 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1501 1490 """
1502 1491 Convert the position of the cursor in text (0 indexed) to a line
1503 1492 number(0-indexed) and a column number (0-indexed) pair
1504 1493
1505 1494 Position should be a valid position in ``text``.
1506 1495
1507 1496 Parameters
1508 1497 ----------
1509 1498 text : str
1510 1499 The text in which to calculate the cursor offset
1511 1500 offset : int
1512 1501 Position of the cursor in ``text``, 0-indexed.
1513 1502
1514 1503 Returns
1515 1504 -------
1516 1505 (line, column) : (int, int)
1517 1506 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1518 1507
1519 1508 See Also
1520 1509 --------
1521 1510 cursor_to_position : reciprocal of this function
1522 1511
1523 1512 """
1524 1513
1525 1514 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1526 1515
1527 1516 before = text[:offset]
1528 1517 blines = before.split('\n') # ! splitnes trim trailing \n
1529 1518 line = before.count('\n')
1530 1519 col = len(blines[-1])
1531 1520 return line, col
1532 1521
1533 1522
1534 1523 def _safe_isinstance(obj, module, class_name, *attrs):
1535 1524 """Checks if obj is an instance of module.class_name if loaded
1536 1525 """
1537 1526 if module in sys.modules:
1538 1527 m = sys.modules[module]
1539 1528 for attr in [class_name, *attrs]:
1540 1529 m = getattr(m, attr)
1541 1530 return isinstance(obj, m)
1542 1531
1543 1532
1544 1533 @context_matcher()
1545 1534 def back_unicode_name_matcher(context: CompletionContext):
1546 1535 """Match Unicode characters back to Unicode name
1547 1536
1548 1537 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1549 1538 """
1550 1539 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1551 1540 return _convert_matcher_v1_result_to_v2(
1552 1541 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1553 1542 )
1554 1543
1555 1544
1556 1545 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1557 1546 """Match Unicode characters back to Unicode name
1558 1547
1559 1548 This does ``☃`` -> ``\\snowman``
1560 1549
1561 1550 Note that snowman is not a valid python3 combining character but will be expanded.
1562 1551 Though it will not recombine back to the snowman character by the completion machinery.
1563 1552
1564 1553 This will not either back-complete standard sequences like \\n, \\b ...
1565 1554
1566 1555 .. deprecated:: 8.6
1567 1556 You can use :meth:`back_unicode_name_matcher` instead.
1568 1557
1569 1558 Returns
1570 1559 =======
1571 1560
1572 1561 Return a tuple with two elements:
1573 1562
1574 1563 - The Unicode character that was matched (preceded with a backslash), or
1575 1564 empty string,
1576 1565 - a sequence (of 1), name for the match Unicode character, preceded by
1577 1566 backslash, or empty if no match.
1578 1567 """
1579 1568 if len(text)<2:
1580 1569 return '', ()
1581 1570 maybe_slash = text[-2]
1582 1571 if maybe_slash != '\\':
1583 1572 return '', ()
1584 1573
1585 1574 char = text[-1]
1586 1575 # no expand on quote for completion in strings.
1587 1576 # nor backcomplete standard ascii keys
1588 1577 if char in string.ascii_letters or char in ('"',"'"):
1589 1578 return '', ()
1590 1579 try :
1591 1580 unic = unicodedata.name(char)
1592 1581 return '\\'+char,('\\'+unic,)
1593 1582 except KeyError:
1594 1583 pass
1595 1584 return '', ()
1596 1585
1597 1586
1598 1587 @context_matcher()
1599 1588 def back_latex_name_matcher(context: CompletionContext):
1600 1589 """Match latex characters back to unicode name
1601 1590
1602 1591 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1603 1592 """
1604 1593 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1605 1594 return _convert_matcher_v1_result_to_v2(
1606 1595 matches, type="latex", fragment=fragment, suppress_if_matches=True
1607 1596 )
1608 1597
1609 1598
1610 1599 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1611 1600 """Match latex characters back to unicode name
1612 1601
1613 1602 This does ``\\ℵ`` -> ``\\aleph``
1614 1603
1615 1604 .. deprecated:: 8.6
1616 1605 You can use :meth:`back_latex_name_matcher` instead.
1617 1606 """
1618 1607 if len(text)<2:
1619 1608 return '', ()
1620 1609 maybe_slash = text[-2]
1621 1610 if maybe_slash != '\\':
1622 1611 return '', ()
1623 1612
1624 1613
1625 1614 char = text[-1]
1626 1615 # no expand on quote for completion in strings.
1627 1616 # nor backcomplete standard ascii keys
1628 1617 if char in string.ascii_letters or char in ('"',"'"):
1629 1618 return '', ()
1630 1619 try :
1631 1620 latex = reverse_latex_symbol[char]
1632 1621 # '\\' replace the \ as well
1633 1622 return '\\'+char,[latex]
1634 1623 except KeyError:
1635 1624 pass
1636 1625 return '', ()
1637 1626
1638 1627
1639 1628 def _formatparamchildren(parameter) -> str:
1640 1629 """
1641 1630 Get parameter name and value from Jedi Private API
1642 1631
1643 1632 Jedi does not expose a simple way to get `param=value` from its API.
1644 1633
1645 1634 Parameters
1646 1635 ----------
1647 1636 parameter
1648 1637 Jedi's function `Param`
1649 1638
1650 1639 Returns
1651 1640 -------
1652 1641 A string like 'a', 'b=1', '*args', '**kwargs'
1653 1642
1654 1643 """
1655 1644 description = parameter.description
1656 1645 if not description.startswith('param '):
1657 1646 raise ValueError('Jedi function parameter description have change format.'
1658 1647 'Expected "param ...", found %r".' % description)
1659 1648 return description[6:]
1660 1649
1661 1650 def _make_signature(completion)-> str:
1662 1651 """
1663 1652 Make the signature from a jedi completion
1664 1653
1665 1654 Parameters
1666 1655 ----------
1667 1656 completion : jedi.Completion
1668 1657 object does not complete a function type
1669 1658
1670 1659 Returns
1671 1660 -------
1672 1661 a string consisting of the function signature, with the parenthesis but
1673 1662 without the function name. example:
1674 1663 `(a, *args, b=1, **kwargs)`
1675 1664
1676 1665 """
1677 1666
1678 1667 # it looks like this might work on jedi 0.17
1679 1668 if hasattr(completion, 'get_signatures'):
1680 1669 signatures = completion.get_signatures()
1681 1670 if not signatures:
1682 1671 return '(?)'
1683 1672
1684 1673 c0 = completion.get_signatures()[0]
1685 1674 return '('+c0.to_string().split('(', maxsplit=1)[1]
1686 1675
1687 1676 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1688 1677 for p in signature.defined_names()) if f])
1689 1678
1690 1679
1691 1680 _CompleteResult = Dict[str, MatcherResult]
1692 1681
1693 1682
1694 1683 DICT_MATCHER_REGEX = re.compile(
1695 1684 r"""(?x)
1696 1685 ( # match dict-referring - or any get item object - expression
1697 1686 .+
1698 1687 )
1699 1688 \[ # open bracket
1700 1689 \s* # and optional whitespace
1701 1690 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1702 1691 # and slices
1703 1692 ((?:(?:
1704 1693 (?: # closed string
1705 1694 [uUbB]? # string prefix (r not handled)
1706 1695 (?:
1707 1696 '(?:[^']|(?<!\\)\\')*'
1708 1697 |
1709 1698 "(?:[^"]|(?<!\\)\\")*"
1710 1699 )
1711 1700 )
1712 1701 |
1713 1702 # capture integers and slices
1714 1703 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1715 1704 |
1716 1705 # integer in bin/hex/oct notation
1717 1706 0[bBxXoO]_?(?:\w|\d)+
1718 1707 )
1719 1708 \s*,\s*
1720 1709 )*)
1721 1710 ((?:
1722 1711 (?: # unclosed string
1723 1712 [uUbB]? # string prefix (r not handled)
1724 1713 (?:
1725 1714 '(?:[^']|(?<!\\)\\')*
1726 1715 |
1727 1716 "(?:[^"]|(?<!\\)\\")*
1728 1717 )
1729 1718 )
1730 1719 |
1731 1720 # unfinished integer
1732 1721 (?:[-+]?\d+)
1733 1722 |
1734 1723 # integer in bin/hex/oct notation
1735 1724 0[bBxXoO]_?(?:\w|\d)+
1736 1725 )
1737 1726 )?
1738 1727 $
1739 1728 """
1740 1729 )
1741 1730
1742 1731
1743 1732 def _convert_matcher_v1_result_to_v2(
1744 1733 matches: Sequence[str],
1745 1734 type: str,
1746 1735 fragment: Optional[str] = None,
1747 1736 suppress_if_matches: bool = False,
1748 1737 ) -> SimpleMatcherResult:
1749 1738 """Utility to help with transition"""
1750 1739 result = {
1751 1740 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1752 1741 "suppress": (True if matches else False) if suppress_if_matches else False,
1753 1742 }
1754 1743 if fragment is not None:
1755 1744 result["matched_fragment"] = fragment
1756 1745 return cast(SimpleMatcherResult, result)
1757 1746
1758 1747
1759 1748 class IPCompleter(Completer):
1760 1749 """Extension of the completer class with IPython-specific features"""
1761 1750
1762 1751 @observe('greedy')
1763 1752 def _greedy_changed(self, change):
1764 1753 """update the splitter and readline delims when greedy is changed"""
1765 1754 if change["new"]:
1766 1755 self.evaluation = "unsafe"
1767 1756 self.auto_close_dict_keys = True
1768 1757 self.splitter.delims = GREEDY_DELIMS
1769 1758 else:
1770 1759 self.evaluation = "limited"
1771 1760 self.auto_close_dict_keys = False
1772 1761 self.splitter.delims = DELIMS
1773 1762
1774 1763 dict_keys_only = Bool(
1775 1764 False,
1776 1765 help="""
1777 1766 Whether to show dict key matches only.
1778 1767
1779 1768 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1780 1769 """,
1781 1770 )
1782 1771
1783 1772 suppress_competing_matchers = UnionTrait(
1784 1773 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1785 1774 default_value=None,
1786 1775 help="""
1787 1776 Whether to suppress completions from other *Matchers*.
1788 1777
1789 1778 When set to ``None`` (default) the matchers will attempt to auto-detect
1790 1779 whether suppression of other matchers is desirable. For example, at
1791 1780 the beginning of a line followed by `%` we expect a magic completion
1792 1781 to be the only applicable option, and after ``my_dict['`` we usually
1793 1782 expect a completion with an existing dictionary key.
1794 1783
1795 1784 If you want to disable this heuristic and see completions from all matchers,
1796 1785 set ``IPCompleter.suppress_competing_matchers = False``.
1797 1786 To disable the heuristic for specific matchers provide a dictionary mapping:
1798 1787 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1799 1788
1800 1789 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1801 1790 completions to the set of matchers with the highest priority;
1802 1791 this is equivalent to ``IPCompleter.merge_completions`` and
1803 1792 can be beneficial for performance, but will sometimes omit relevant
1804 1793 candidates from matchers further down the priority list.
1805 1794 """,
1806 1795 ).tag(config=True)
1807 1796
1808 1797 merge_completions = Bool(
1809 1798 True,
1810 1799 help="""Whether to merge completion results into a single list
1811 1800
1812 1801 If False, only the completion results from the first non-empty
1813 1802 completer will be returned.
1814 1803
1815 1804 As of version 8.6.0, setting the value to ``False`` is an alias for:
1816 1805 ``IPCompleter.suppress_competing_matchers = True.``.
1817 1806 """,
1818 1807 ).tag(config=True)
1819 1808
1820 1809 disable_matchers = ListTrait(
1821 1810 Unicode(),
1822 1811 help="""List of matchers to disable.
1823 1812
1824 1813 The list should contain matcher identifiers (see :any:`completion_matcher`).
1825 1814 """,
1826 1815 ).tag(config=True)
1827 1816
1828 1817 omit__names = Enum(
1829 1818 (0, 1, 2),
1830 1819 default_value=2,
1831 1820 help="""Instruct the completer to omit private method names
1832 1821
1833 1822 Specifically, when completing on ``object.<tab>``.
1834 1823
1835 1824 When 2 [default]: all names that start with '_' will be excluded.
1836 1825
1837 1826 When 1: all 'magic' names (``__foo__``) will be excluded.
1838 1827
1839 1828 When 0: nothing will be excluded.
1840 1829 """
1841 1830 ).tag(config=True)
1842 1831 limit_to__all__ = Bool(False,
1843 1832 help="""
1844 1833 DEPRECATED as of version 5.0.
1845 1834
1846 1835 Instruct the completer to use __all__ for the completion
1847 1836
1848 1837 Specifically, when completing on ``object.<tab>``.
1849 1838
1850 1839 When True: only those names in obj.__all__ will be included.
1851 1840
1852 1841 When False [default]: the __all__ attribute is ignored
1853 1842 """,
1854 1843 ).tag(config=True)
1855 1844
1856 1845 profile_completions = Bool(
1857 1846 default_value=False,
1858 1847 help="If True, emit profiling data for completion subsystem using cProfile."
1859 1848 ).tag(config=True)
1860 1849
1861 1850 profiler_output_dir = Unicode(
1862 1851 default_value=".completion_profiles",
1863 1852 help="Template for path at which to output profile data for completions."
1864 1853 ).tag(config=True)
1865 1854
1866 1855 @observe('limit_to__all__')
1867 1856 def _limit_to_all_changed(self, change):
1868 1857 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1869 1858 'value has been deprecated since IPython 5.0, will be made to have '
1870 1859 'no effects and then removed in future version of IPython.',
1871 1860 UserWarning)
1872 1861
1873 1862 def __init__(
1874 1863 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1875 1864 ):
1876 1865 """IPCompleter() -> completer
1877 1866
1878 1867 Return a completer object.
1879 1868
1880 1869 Parameters
1881 1870 ----------
1882 1871 shell
1883 1872 a pointer to the ipython shell itself. This is needed
1884 1873 because this completer knows about magic functions, and those can
1885 1874 only be accessed via the ipython instance.
1886 1875 namespace : dict, optional
1887 1876 an optional dict where completions are performed.
1888 1877 global_namespace : dict, optional
1889 1878 secondary optional dict for completions, to
1890 1879 handle cases (such as IPython embedded inside functions) where
1891 1880 both Python scopes are visible.
1892 1881 config : Config
1893 1882 traitlet's config object
1894 1883 **kwargs
1895 1884 passed to super class unmodified.
1896 1885 """
1897 1886
1898 1887 self.magic_escape = ESC_MAGIC
1899 1888 self.splitter = CompletionSplitter()
1900 1889
1901 1890 # _greedy_changed() depends on splitter and readline being defined:
1902 1891 super().__init__(
1903 1892 namespace=namespace,
1904 1893 global_namespace=global_namespace,
1905 1894 config=config,
1906 1895 **kwargs,
1907 1896 )
1908 1897
1909 1898 # List where completion matches will be stored
1910 1899 self.matches = []
1911 1900 self.shell = shell
1912 1901 # Regexp to split filenames with spaces in them
1913 1902 self.space_name_re = re.compile(r'([^\\] )')
1914 1903 # Hold a local ref. to glob.glob for speed
1915 1904 self.glob = glob.glob
1916 1905
1917 1906 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1918 1907 # buffers, to avoid completion problems.
1919 1908 term = os.environ.get('TERM','xterm')
1920 1909 self.dumb_terminal = term in ['dumb','emacs']
1921 1910
1922 1911 # Special handling of backslashes needed in win32 platforms
1923 1912 if sys.platform == "win32":
1924 1913 self.clean_glob = self._clean_glob_win32
1925 1914 else:
1926 1915 self.clean_glob = self._clean_glob
1927 1916
1928 1917 #regexp to parse docstring for function signature
1929 1918 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1930 1919 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1931 1920 #use this if positional argument name is also needed
1932 1921 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1933 1922
1934 1923 self.magic_arg_matchers = [
1935 1924 self.magic_config_matcher,
1936 1925 self.magic_color_matcher,
1937 1926 ]
1938 1927
1939 1928 # This is set externally by InteractiveShell
1940 1929 self.custom_completers = None
1941 1930
1942 1931 # This is a list of names of unicode characters that can be completed
1943 1932 # into their corresponding unicode value. The list is large, so we
1944 1933 # lazily initialize it on first use. Consuming code should access this
1945 1934 # attribute through the `@unicode_names` property.
1946 1935 self._unicode_names = None
1947 1936
1948 1937 self._backslash_combining_matchers = [
1949 1938 self.latex_name_matcher,
1950 1939 self.unicode_name_matcher,
1951 1940 back_latex_name_matcher,
1952 1941 back_unicode_name_matcher,
1953 1942 self.fwd_unicode_matcher,
1954 1943 ]
1955 1944
1956 1945 if not self.backslash_combining_completions:
1957 1946 for matcher in self._backslash_combining_matchers:
1958 1947 self.disable_matchers.append(_get_matcher_id(matcher))
1959 1948
1960 1949 if not self.merge_completions:
1961 1950 self.suppress_competing_matchers = True
1962 1951
1963 1952 @property
1964 1953 def matchers(self) -> List[Matcher]:
1965 1954 """All active matcher routines for completion"""
1966 1955 if self.dict_keys_only:
1967 1956 return [self.dict_key_matcher]
1968 1957
1969 1958 if self.use_jedi:
1970 1959 return [
1971 1960 *self.custom_matchers,
1972 1961 *self._backslash_combining_matchers,
1973 1962 *self.magic_arg_matchers,
1974 1963 self.custom_completer_matcher,
1975 1964 self.magic_matcher,
1976 1965 self._jedi_matcher,
1977 1966 self.dict_key_matcher,
1978 1967 self.file_matcher,
1979 1968 ]
1980 1969 else:
1981 1970 return [
1982 1971 *self.custom_matchers,
1983 1972 *self._backslash_combining_matchers,
1984 1973 *self.magic_arg_matchers,
1985 1974 self.custom_completer_matcher,
1986 1975 self.dict_key_matcher,
1987 1976 self.magic_matcher,
1988 1977 self.python_matcher,
1989 1978 self.file_matcher,
1990 1979 self.python_func_kw_matcher,
1991 1980 ]
1992 1981
1993 1982 def all_completions(self, text:str) -> List[str]:
1994 1983 """
1995 1984 Wrapper around the completion methods for the benefit of emacs.
1996 1985 """
1997 1986 prefix = text.rpartition('.')[0]
1998 1987 with provisionalcompleter():
1999 1988 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
2000 1989 for c in self.completions(text, len(text))]
2001 1990
2002 1991 return self.complete(text)[1]
2003 1992
2004 1993 def _clean_glob(self, text:str):
2005 1994 return self.glob("%s*" % text)
2006 1995
2007 1996 def _clean_glob_win32(self, text:str):
2008 1997 return [f.replace("\\","/")
2009 1998 for f in self.glob("%s*" % text)]
2010 1999
2011 2000 @context_matcher()
2012 2001 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2013 2002 """Same as :any:`file_matches`, but adopted to new Matcher API."""
2014 2003 matches = self.file_matches(context.token)
2015 2004 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
2016 2005 # starts with `/home/`, `C:\`, etc)
2017 2006 return _convert_matcher_v1_result_to_v2(matches, type="path")
2018 2007
2019 2008 def file_matches(self, text: str) -> List[str]:
2020 2009 """Match filenames, expanding ~USER type strings.
2021 2010
2022 2011 Most of the seemingly convoluted logic in this completer is an
2023 2012 attempt to handle filenames with spaces in them. And yet it's not
2024 2013 quite perfect, because Python's readline doesn't expose all of the
2025 2014 GNU readline details needed for this to be done correctly.
2026 2015
2027 2016 For a filename with a space in it, the printed completions will be
2028 2017 only the parts after what's already been typed (instead of the
2029 2018 full completions, as is normally done). I don't think with the
2030 2019 current (as of Python 2.3) Python readline it's possible to do
2031 2020 better.
2032 2021
2033 2022 .. deprecated:: 8.6
2034 2023 You can use :meth:`file_matcher` instead.
2035 2024 """
2036 2025
2037 2026 # chars that require escaping with backslash - i.e. chars
2038 2027 # that readline treats incorrectly as delimiters, but we
2039 2028 # don't want to treat as delimiters in filename matching
2040 2029 # when escaped with backslash
2041 2030 if text.startswith('!'):
2042 2031 text = text[1:]
2043 2032 text_prefix = u'!'
2044 2033 else:
2045 2034 text_prefix = u''
2046 2035
2047 2036 text_until_cursor = self.text_until_cursor
2048 2037 # track strings with open quotes
2049 2038 open_quotes = has_open_quotes(text_until_cursor)
2050 2039
2051 2040 if '(' in text_until_cursor or '[' in text_until_cursor:
2052 2041 lsplit = text
2053 2042 else:
2054 2043 try:
2055 2044 # arg_split ~ shlex.split, but with unicode bugs fixed by us
2056 2045 lsplit = arg_split(text_until_cursor)[-1]
2057 2046 except ValueError:
2058 2047 # typically an unmatched ", or backslash without escaped char.
2059 2048 if open_quotes:
2060 2049 lsplit = text_until_cursor.split(open_quotes)[-1]
2061 2050 else:
2062 2051 return []
2063 2052 except IndexError:
2064 2053 # tab pressed on empty line
2065 2054 lsplit = ""
2066 2055
2067 2056 if not open_quotes and lsplit != protect_filename(lsplit):
2068 2057 # if protectables are found, do matching on the whole escaped name
2069 2058 has_protectables = True
2070 2059 text0,text = text,lsplit
2071 2060 else:
2072 2061 has_protectables = False
2073 2062 text = os.path.expanduser(text)
2074 2063
2075 2064 if text == "":
2076 2065 return [text_prefix + protect_filename(f) for f in self.glob("*")]
2077 2066
2078 2067 # Compute the matches from the filesystem
2079 2068 if sys.platform == 'win32':
2080 2069 m0 = self.clean_glob(text)
2081 2070 else:
2082 2071 m0 = self.clean_glob(text.replace('\\', ''))
2083 2072
2084 2073 if has_protectables:
2085 2074 # If we had protectables, we need to revert our changes to the
2086 2075 # beginning of filename so that we don't double-write the part
2087 2076 # of the filename we have so far
2088 2077 len_lsplit = len(lsplit)
2089 2078 matches = [text_prefix + text0 +
2090 2079 protect_filename(f[len_lsplit:]) for f in m0]
2091 2080 else:
2092 2081 if open_quotes:
2093 2082 # if we have a string with an open quote, we don't need to
2094 2083 # protect the names beyond the quote (and we _shouldn't_, as
2095 2084 # it would cause bugs when the filesystem call is made).
2096 2085 matches = m0 if sys.platform == "win32" else\
2097 2086 [protect_filename(f, open_quotes) for f in m0]
2098 2087 else:
2099 2088 matches = [text_prefix +
2100 2089 protect_filename(f) for f in m0]
2101 2090
2102 2091 # Mark directories in input list by appending '/' to their names.
2103 2092 return [x+'/' if os.path.isdir(x) else x for x in matches]
2104 2093
2105 2094 @context_matcher()
2106 2095 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2107 2096 """Match magics."""
2108 2097 text = context.token
2109 2098 matches = self.magic_matches(text)
2110 2099 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
2111 2100 is_magic_prefix = len(text) > 0 and text[0] == "%"
2112 2101 result["suppress"] = is_magic_prefix and bool(result["completions"])
2113 2102 return result
2114 2103
2115 2104 def magic_matches(self, text: str):
2116 2105 """Match magics.
2117 2106
2118 2107 .. deprecated:: 8.6
2119 2108 You can use :meth:`magic_matcher` instead.
2120 2109 """
2121 2110 # Get all shell magics now rather than statically, so magics loaded at
2122 2111 # runtime show up too.
2123 2112 lsm = self.shell.magics_manager.lsmagic()
2124 2113 line_magics = lsm['line']
2125 2114 cell_magics = lsm['cell']
2126 2115 pre = self.magic_escape
2127 2116 pre2 = pre+pre
2128 2117
2129 2118 explicit_magic = text.startswith(pre)
2130 2119
2131 2120 # Completion logic:
2132 2121 # - user gives %%: only do cell magics
2133 2122 # - user gives %: do both line and cell magics
2134 2123 # - no prefix: do both
2135 2124 # In other words, line magics are skipped if the user gives %% explicitly
2136 2125 #
2137 2126 # We also exclude magics that match any currently visible names:
2138 2127 # https://github.com/ipython/ipython/issues/4877, unless the user has
2139 2128 # typed a %:
2140 2129 # https://github.com/ipython/ipython/issues/10754
2141 2130 bare_text = text.lstrip(pre)
2142 2131 global_matches = self.global_matches(bare_text)
2143 2132 if not explicit_magic:
2144 2133 def matches(magic):
2145 2134 """
2146 2135 Filter magics, in particular remove magics that match
2147 2136 a name present in global namespace.
2148 2137 """
2149 2138 return ( magic.startswith(bare_text) and
2150 2139 magic not in global_matches )
2151 2140 else:
2152 2141 def matches(magic):
2153 2142 return magic.startswith(bare_text)
2154 2143
2155 2144 comp = [ pre2+m for m in cell_magics if matches(m)]
2156 2145 if not text.startswith(pre2):
2157 2146 comp += [ pre+m for m in line_magics if matches(m)]
2158 2147
2159 2148 return comp
2160 2149
2161 2150 @context_matcher()
2162 2151 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2163 2152 """Match class names and attributes for %config magic."""
2164 2153 # NOTE: uses `line_buffer` equivalent for compatibility
2165 2154 matches = self.magic_config_matches(context.line_with_cursor)
2166 2155 return _convert_matcher_v1_result_to_v2(matches, type="param")
2167 2156
2168 2157 def magic_config_matches(self, text: str) -> List[str]:
2169 2158 """Match class names and attributes for %config magic.
2170 2159
2171 2160 .. deprecated:: 8.6
2172 2161 You can use :meth:`magic_config_matcher` instead.
2173 2162 """
2174 2163 texts = text.strip().split()
2175 2164
2176 2165 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
2177 2166 # get all configuration classes
2178 2167 classes = sorted(set([ c for c in self.shell.configurables
2179 2168 if c.__class__.class_traits(config=True)
2180 2169 ]), key=lambda x: x.__class__.__name__)
2181 2170 classnames = [ c.__class__.__name__ for c in classes ]
2182 2171
2183 2172 # return all classnames if config or %config is given
2184 2173 if len(texts) == 1:
2185 2174 return classnames
2186 2175
2187 2176 # match classname
2188 2177 classname_texts = texts[1].split('.')
2189 2178 classname = classname_texts[0]
2190 2179 classname_matches = [ c for c in classnames
2191 2180 if c.startswith(classname) ]
2192 2181
2193 2182 # return matched classes or the matched class with attributes
2194 2183 if texts[1].find('.') < 0:
2195 2184 return classname_matches
2196 2185 elif len(classname_matches) == 1 and \
2197 2186 classname_matches[0] == classname:
2198 2187 cls = classes[classnames.index(classname)].__class__
2199 2188 help = cls.class_get_help()
2200 2189 # strip leading '--' from cl-args:
2201 2190 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
2202 2191 return [ attr.split('=')[0]
2203 2192 for attr in help.strip().splitlines()
2204 2193 if attr.startswith(texts[1]) ]
2205 2194 return []
2206 2195
2207 2196 @context_matcher()
2208 2197 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2209 2198 """Match color schemes for %colors magic."""
2210 2199 # NOTE: uses `line_buffer` equivalent for compatibility
2211 2200 matches = self.magic_color_matches(context.line_with_cursor)
2212 2201 return _convert_matcher_v1_result_to_v2(matches, type="param")
2213 2202
2214 2203 def magic_color_matches(self, text: str) -> List[str]:
2215 2204 """Match color schemes for %colors magic.
2216 2205
2217 2206 .. deprecated:: 8.6
2218 2207 You can use :meth:`magic_color_matcher` instead.
2219 2208 """
2220 2209 texts = text.split()
2221 2210 if text.endswith(' '):
2222 2211 # .split() strips off the trailing whitespace. Add '' back
2223 2212 # so that: '%colors ' -> ['%colors', '']
2224 2213 texts.append('')
2225 2214
2226 2215 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
2227 2216 prefix = texts[1]
2228 2217 return [ color for color in InspectColors.keys()
2229 2218 if color.startswith(prefix) ]
2230 2219 return []
2231 2220
2232 2221 @context_matcher(identifier="IPCompleter.jedi_matcher")
2233 2222 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
2234 2223 matches = self._jedi_matches(
2235 2224 cursor_column=context.cursor_position,
2236 2225 cursor_line=context.cursor_line,
2237 2226 text=context.full_text,
2238 2227 )
2239 2228 return {
2240 2229 "completions": matches,
2241 2230 # static analysis should not suppress other matchers
2242 2231 "suppress": False,
2243 2232 }
2244 2233
2245 2234 def _jedi_matches(
2246 2235 self, cursor_column: int, cursor_line: int, text: str
2247 2236 ) -> Iterator[_JediCompletionLike]:
2248 2237 """
2249 2238 Return a list of :any:`jedi.api.Completion`\\s object from a ``text`` and
2250 2239 cursor position.
2251 2240
2252 2241 Parameters
2253 2242 ----------
2254 2243 cursor_column : int
2255 2244 column position of the cursor in ``text``, 0-indexed.
2256 2245 cursor_line : int
2257 2246 line position of the cursor in ``text``, 0-indexed
2258 2247 text : str
2259 2248 text to complete
2260 2249
2261 2250 Notes
2262 2251 -----
2263 2252 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
2264 2253 object containing a string with the Jedi debug information attached.
2265 2254
2266 2255 .. deprecated:: 8.6
2267 2256 You can use :meth:`_jedi_matcher` instead.
2268 2257 """
2269 2258 namespaces = [self.namespace]
2270 2259 if self.global_namespace is not None:
2271 2260 namespaces.append(self.global_namespace)
2272 2261
2273 2262 completion_filter = lambda x:x
2274 2263 offset = cursor_to_position(text, cursor_line, cursor_column)
2275 2264 # filter output if we are completing for object members
2276 2265 if offset:
2277 2266 pre = text[offset-1]
2278 2267 if pre == '.':
2279 2268 if self.omit__names == 2:
2280 2269 completion_filter = lambda c:not c.name.startswith('_')
2281 2270 elif self.omit__names == 1:
2282 2271 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
2283 2272 elif self.omit__names == 0:
2284 2273 completion_filter = lambda x:x
2285 2274 else:
2286 2275 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
2287 2276
2288 2277 interpreter = jedi.Interpreter(text[:offset], namespaces)
2289 2278 try_jedi = True
2290 2279
2291 2280 try:
2292 2281 # find the first token in the current tree -- if it is a ' or " then we are in a string
2293 2282 completing_string = False
2294 2283 try:
2295 2284 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
2296 2285 except StopIteration:
2297 2286 pass
2298 2287 else:
2299 2288 # note the value may be ', ", or it may also be ''' or """, or
2300 2289 # in some cases, """what/you/typed..., but all of these are
2301 2290 # strings.
2302 2291 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
2303 2292
2304 2293 # if we are in a string jedi is likely not the right candidate for
2305 2294 # now. Skip it.
2306 2295 try_jedi = not completing_string
2307 2296 except Exception as e:
2308 2297 # many of things can go wrong, we are using private API just don't crash.
2309 2298 if self.debug:
2310 2299 print("Error detecting if completing a non-finished string :", e, '|')
2311 2300
2312 2301 if not try_jedi:
2313 2302 return iter([])
2314 2303 try:
2315 2304 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2316 2305 except Exception as e:
2317 2306 if self.debug:
2318 2307 return iter(
2319 2308 [
2320 2309 _FakeJediCompletion(
2321 2310 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2322 2311 % (e)
2323 2312 )
2324 2313 ]
2325 2314 )
2326 2315 else:
2327 2316 return iter([])
2328 2317
2329 2318 @context_matcher()
2330 2319 def python_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2331 2320 """Match attributes or global python names"""
2332 2321 text = context.line_with_cursor
2333 2322 if "." in text:
2334 2323 try:
2335 2324 matches, fragment = self._attr_matches(text, include_prefix=False)
2336 2325 if text.endswith(".") and self.omit__names:
2337 2326 if self.omit__names == 1:
2338 2327 # true if txt is _not_ a __ name, false otherwise:
2339 2328 no__name = lambda txt: re.match(r".*\.__.*?__", txt) is None
2340 2329 else:
2341 2330 # true if txt is _not_ a _ name, false otherwise:
2342 2331 no__name = (
2343 2332 lambda txt: re.match(r"\._.*?", txt[txt.rindex(".") :])
2344 2333 is None
2345 2334 )
2346 2335 matches = filter(no__name, matches)
2347 2336 return _convert_matcher_v1_result_to_v2(
2348 2337 matches, type="attribute", fragment=fragment
2349 2338 )
2350 2339 except NameError:
2351 2340 # catches <undefined attributes>.<tab>
2352 2341 matches = []
2353 2342 return _convert_matcher_v1_result_to_v2(matches, type="attribute")
2354 2343 else:
2355 2344 matches = self.global_matches(context.token)
2356 2345 # TODO: maybe distinguish between functions, modules and just "variables"
2357 2346 return _convert_matcher_v1_result_to_v2(matches, type="variable")
2358 2347
2359 2348 @completion_matcher(api_version=1)
2360 2349 def python_matches(self, text: str) -> Iterable[str]:
2361 2350 """Match attributes or global python names.
2362 2351
2363 2352 .. deprecated:: 8.27
2364 2353 You can use :meth:`python_matcher` instead."""
2365 2354 if "." in text:
2366 2355 try:
2367 2356 matches = self.attr_matches(text)
2368 2357 if text.endswith('.') and self.omit__names:
2369 2358 if self.omit__names == 1:
2370 2359 # true if txt is _not_ a __ name, false otherwise:
2371 2360 no__name = (lambda txt:
2372 2361 re.match(r'.*\.__.*?__',txt) is None)
2373 2362 else:
2374 2363 # true if txt is _not_ a _ name, false otherwise:
2375 2364 no__name = (lambda txt:
2376 2365 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
2377 2366 matches = filter(no__name, matches)
2378 2367 except NameError:
2379 2368 # catches <undefined attributes>.<tab>
2380 2369 matches = []
2381 2370 else:
2382 2371 matches = self.global_matches(text)
2383 2372 return matches
2384 2373
2385 2374 def _default_arguments_from_docstring(self, doc):
2386 2375 """Parse the first line of docstring for call signature.
2387 2376
2388 2377 Docstring should be of the form 'min(iterable[, key=func])\n'.
2389 2378 It can also parse cython docstring of the form
2390 2379 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2391 2380 """
2392 2381 if doc is None:
2393 2382 return []
2394 2383
2395 2384 #care only the firstline
2396 2385 line = doc.lstrip().splitlines()[0]
2397 2386
2398 2387 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2399 2388 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2400 2389 sig = self.docstring_sig_re.search(line)
2401 2390 if sig is None:
2402 2391 return []
2403 2392 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2404 2393 sig = sig.groups()[0].split(',')
2405 2394 ret = []
2406 2395 for s in sig:
2407 2396 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2408 2397 ret += self.docstring_kwd_re.findall(s)
2409 2398 return ret
2410 2399
2411 2400 def _default_arguments(self, obj):
2412 2401 """Return the list of default arguments of obj if it is callable,
2413 2402 or empty list otherwise."""
2414 2403 call_obj = obj
2415 2404 ret = []
2416 2405 if inspect.isbuiltin(obj):
2417 2406 pass
2418 2407 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2419 2408 if inspect.isclass(obj):
2420 2409 #for cython embedsignature=True the constructor docstring
2421 2410 #belongs to the object itself not __init__
2422 2411 ret += self._default_arguments_from_docstring(
2423 2412 getattr(obj, '__doc__', ''))
2424 2413 # for classes, check for __init__,__new__
2425 2414 call_obj = (getattr(obj, '__init__', None) or
2426 2415 getattr(obj, '__new__', None))
2427 2416 # for all others, check if they are __call__able
2428 2417 elif hasattr(obj, '__call__'):
2429 2418 call_obj = obj.__call__
2430 2419 ret += self._default_arguments_from_docstring(
2431 2420 getattr(call_obj, '__doc__', ''))
2432 2421
2433 2422 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2434 2423 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2435 2424
2436 2425 try:
2437 2426 sig = inspect.signature(obj)
2438 2427 ret.extend(k for k, v in sig.parameters.items() if
2439 2428 v.kind in _keeps)
2440 2429 except ValueError:
2441 2430 pass
2442 2431
2443 2432 return list(set(ret))
2444 2433
2445 2434 @context_matcher()
2446 2435 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2447 2436 """Match named parameters (kwargs) of the last open function."""
2448 2437 matches = self.python_func_kw_matches(context.token)
2449 2438 return _convert_matcher_v1_result_to_v2(matches, type="param")
2450 2439
2451 2440 def python_func_kw_matches(self, text):
2452 2441 """Match named parameters (kwargs) of the last open function.
2453 2442
2454 2443 .. deprecated:: 8.6
2455 2444 You can use :meth:`python_func_kw_matcher` instead.
2456 2445 """
2457 2446
2458 2447 if "." in text: # a parameter cannot be dotted
2459 2448 return []
2460 2449 try: regexp = self.__funcParamsRegex
2461 2450 except AttributeError:
2462 2451 regexp = self.__funcParamsRegex = re.compile(r'''
2463 2452 '.*?(?<!\\)' | # single quoted strings or
2464 2453 ".*?(?<!\\)" | # double quoted strings or
2465 2454 \w+ | # identifier
2466 2455 \S # other characters
2467 2456 ''', re.VERBOSE | re.DOTALL)
2468 2457 # 1. find the nearest identifier that comes before an unclosed
2469 2458 # parenthesis before the cursor
2470 2459 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2471 2460 tokens = regexp.findall(self.text_until_cursor)
2472 2461 iterTokens = reversed(tokens); openPar = 0
2473 2462
2474 2463 for token in iterTokens:
2475 2464 if token == ')':
2476 2465 openPar -= 1
2477 2466 elif token == '(':
2478 2467 openPar += 1
2479 2468 if openPar > 0:
2480 2469 # found the last unclosed parenthesis
2481 2470 break
2482 2471 else:
2483 2472 return []
2484 2473 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2485 2474 ids = []
2486 2475 isId = re.compile(r'\w+$').match
2487 2476
2488 2477 while True:
2489 2478 try:
2490 2479 ids.append(next(iterTokens))
2491 2480 if not isId(ids[-1]):
2492 2481 ids.pop(); break
2493 2482 if not next(iterTokens) == '.':
2494 2483 break
2495 2484 except StopIteration:
2496 2485 break
2497 2486
2498 2487 # Find all named arguments already assigned to, as to avoid suggesting
2499 2488 # them again
2500 2489 usedNamedArgs = set()
2501 2490 par_level = -1
2502 2491 for token, next_token in zip(tokens, tokens[1:]):
2503 2492 if token == '(':
2504 2493 par_level += 1
2505 2494 elif token == ')':
2506 2495 par_level -= 1
2507 2496
2508 2497 if par_level != 0:
2509 2498 continue
2510 2499
2511 2500 if next_token != '=':
2512 2501 continue
2513 2502
2514 2503 usedNamedArgs.add(token)
2515 2504
2516 2505 argMatches = []
2517 2506 try:
2518 2507 callableObj = '.'.join(ids[::-1])
2519 2508 namedArgs = self._default_arguments(eval(callableObj,
2520 2509 self.namespace))
2521 2510
2522 2511 # Remove used named arguments from the list, no need to show twice
2523 2512 for namedArg in set(namedArgs) - usedNamedArgs:
2524 2513 if namedArg.startswith(text):
2525 2514 argMatches.append("%s=" %namedArg)
2526 2515 except:
2527 2516 pass
2528 2517
2529 2518 return argMatches
2530 2519
2531 2520 @staticmethod
2532 2521 def _get_keys(obj: Any) -> List[Any]:
2533 2522 # Objects can define their own completions by defining an
2534 2523 # _ipy_key_completions_() method.
2535 2524 method = get_real_method(obj, '_ipython_key_completions_')
2536 2525 if method is not None:
2537 2526 return method()
2538 2527
2539 2528 # Special case some common in-memory dict-like types
2540 2529 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2541 2530 try:
2542 2531 return list(obj.keys())
2543 2532 except Exception:
2544 2533 return []
2545 2534 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2546 2535 try:
2547 2536 return list(obj.obj.keys())
2548 2537 except Exception:
2549 2538 return []
2550 2539 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2551 2540 _safe_isinstance(obj, 'numpy', 'void'):
2552 2541 return obj.dtype.names or []
2553 2542 return []
2554 2543
2555 2544 @context_matcher()
2556 2545 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2557 2546 """Match string keys in a dictionary, after e.g. ``foo[``."""
2558 2547 matches = self.dict_key_matches(context.token)
2559 2548 return _convert_matcher_v1_result_to_v2(
2560 2549 matches, type="dict key", suppress_if_matches=True
2561 2550 )
2562 2551
2563 2552 def dict_key_matches(self, text: str) -> List[str]:
2564 2553 """Match string keys in a dictionary, after e.g. ``foo[``.
2565 2554
2566 2555 .. deprecated:: 8.6
2567 2556 You can use :meth:`dict_key_matcher` instead.
2568 2557 """
2569 2558
2570 2559 # Short-circuit on closed dictionary (regular expression would
2571 2560 # not match anyway, but would take quite a while).
2572 2561 if self.text_until_cursor.strip().endswith("]"):
2573 2562 return []
2574 2563
2575 2564 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2576 2565
2577 2566 if match is None:
2578 2567 return []
2579 2568
2580 2569 expr, prior_tuple_keys, key_prefix = match.groups()
2581 2570
2582 2571 obj = self._evaluate_expr(expr)
2583 2572
2584 2573 if obj is not_found:
2585 2574 return []
2586 2575
2587 2576 keys = self._get_keys(obj)
2588 2577 if not keys:
2589 2578 return keys
2590 2579
2591 2580 tuple_prefix = guarded_eval(
2592 2581 prior_tuple_keys,
2593 2582 EvaluationContext(
2594 2583 globals=self.global_namespace,
2595 2584 locals=self.namespace,
2596 2585 evaluation=self.evaluation, # type: ignore
2597 2586 in_subscript=True,
2598 2587 ),
2599 2588 )
2600 2589
2601 2590 closing_quote, token_offset, matches = match_dict_keys(
2602 2591 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2603 2592 )
2604 2593 if not matches:
2605 2594 return []
2606 2595
2607 2596 # get the cursor position of
2608 2597 # - the text being completed
2609 2598 # - the start of the key text
2610 2599 # - the start of the completion
2611 2600 text_start = len(self.text_until_cursor) - len(text)
2612 2601 if key_prefix:
2613 2602 key_start = match.start(3)
2614 2603 completion_start = key_start + token_offset
2615 2604 else:
2616 2605 key_start = completion_start = match.end()
2617 2606
2618 2607 # grab the leading prefix, to make sure all completions start with `text`
2619 2608 if text_start > key_start:
2620 2609 leading = ''
2621 2610 else:
2622 2611 leading = text[text_start:completion_start]
2623 2612
2624 2613 # append closing quote and bracket as appropriate
2625 2614 # this is *not* appropriate if the opening quote or bracket is outside
2626 2615 # the text given to this method, e.g. `d["""a\nt
2627 2616 can_close_quote = False
2628 2617 can_close_bracket = False
2629 2618
2630 2619 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2631 2620
2632 2621 if continuation.startswith(closing_quote):
2633 2622 # do not close if already closed, e.g. `d['a<tab>'`
2634 2623 continuation = continuation[len(closing_quote) :]
2635 2624 else:
2636 2625 can_close_quote = True
2637 2626
2638 2627 continuation = continuation.strip()
2639 2628
2640 2629 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2641 2630 # handling it is out of scope, so let's avoid appending suffixes.
2642 2631 has_known_tuple_handling = isinstance(obj, dict)
2643 2632
2644 2633 can_close_bracket = (
2645 2634 not continuation.startswith("]") and self.auto_close_dict_keys
2646 2635 )
2647 2636 can_close_tuple_item = (
2648 2637 not continuation.startswith(",")
2649 2638 and has_known_tuple_handling
2650 2639 and self.auto_close_dict_keys
2651 2640 )
2652 2641 can_close_quote = can_close_quote and self.auto_close_dict_keys
2653 2642
2654 2643 # fast path if closing quote should be appended but not suffix is allowed
2655 2644 if not can_close_quote and not can_close_bracket and closing_quote:
2656 2645 return [leading + k for k in matches]
2657 2646
2658 2647 results = []
2659 2648
2660 2649 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2661 2650
2662 2651 for k, state_flag in matches.items():
2663 2652 result = leading + k
2664 2653 if can_close_quote and closing_quote:
2665 2654 result += closing_quote
2666 2655
2667 2656 if state_flag == end_of_tuple_or_item:
2668 2657 # We do not know which suffix to add,
2669 2658 # e.g. both tuple item and string
2670 2659 # match this item.
2671 2660 pass
2672 2661
2673 2662 if state_flag in end_of_tuple_or_item and can_close_bracket:
2674 2663 result += "]"
2675 2664 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2676 2665 result += ", "
2677 2666 results.append(result)
2678 2667 return results
2679 2668
2680 2669 @context_matcher()
2681 2670 def unicode_name_matcher(self, context: CompletionContext):
2682 2671 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2683 2672 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2684 2673 return _convert_matcher_v1_result_to_v2(
2685 2674 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2686 2675 )
2687 2676
2688 2677 @staticmethod
2689 2678 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2690 2679 """Match Latex-like syntax for unicode characters base
2691 2680 on the name of the character.
2692 2681
2693 2682 This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
2694 2683
2695 2684 Works only on valid python 3 identifier, or on combining characters that
2696 2685 will combine to form a valid identifier.
2697 2686 """
2698 2687 slashpos = text.rfind('\\')
2699 2688 if slashpos > -1:
2700 2689 s = text[slashpos+1:]
2701 2690 try :
2702 2691 unic = unicodedata.lookup(s)
2703 2692 # allow combining chars
2704 2693 if ('a'+unic).isidentifier():
2705 2694 return '\\'+s,[unic]
2706 2695 except KeyError:
2707 2696 pass
2708 2697 return '', []
2709 2698
2710 2699 @context_matcher()
2711 2700 def latex_name_matcher(self, context: CompletionContext):
2712 2701 """Match Latex syntax for unicode characters.
2713 2702
2714 2703 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
2715 2704 """
2716 2705 fragment, matches = self.latex_matches(context.text_until_cursor)
2717 2706 return _convert_matcher_v1_result_to_v2(
2718 2707 matches, type="latex", fragment=fragment, suppress_if_matches=True
2719 2708 )
2720 2709
2721 2710 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2722 2711 """Match Latex syntax for unicode characters.
2723 2712
2724 2713 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
2725 2714
2726 2715 .. deprecated:: 8.6
2727 2716 You can use :meth:`latex_name_matcher` instead.
2728 2717 """
2729 2718 slashpos = text.rfind('\\')
2730 2719 if slashpos > -1:
2731 2720 s = text[slashpos:]
2732 2721 if s in latex_symbols:
2733 2722 # Try to complete a full latex symbol to unicode
2734 2723 # \\alpha -> α
2735 2724 return s, [latex_symbols[s]]
2736 2725 else:
2737 2726 # If a user has partially typed a latex symbol, give them
2738 2727 # a full list of options \al -> [\aleph, \alpha]
2739 2728 matches = [k for k in latex_symbols if k.startswith(s)]
2740 2729 if matches:
2741 2730 return s, matches
2742 2731 return '', ()
2743 2732
2744 2733 @context_matcher()
2745 2734 def custom_completer_matcher(self, context):
2746 2735 """Dispatch custom completer.
2747 2736
2748 2737 If a match is found, suppresses all other matchers except for Jedi.
2749 2738 """
2750 2739 matches = self.dispatch_custom_completer(context.token) or []
2751 2740 result = _convert_matcher_v1_result_to_v2(
2752 2741 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2753 2742 )
2754 2743 result["ordered"] = True
2755 2744 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2756 2745 return result
2757 2746
2758 2747 def dispatch_custom_completer(self, text):
2759 2748 """
2760 2749 .. deprecated:: 8.6
2761 2750 You can use :meth:`custom_completer_matcher` instead.
2762 2751 """
2763 2752 if not self.custom_completers:
2764 2753 return
2765 2754
2766 2755 line = self.line_buffer
2767 2756 if not line.strip():
2768 2757 return None
2769 2758
2770 2759 # Create a little structure to pass all the relevant information about
2771 2760 # the current completion to any custom completer.
2772 2761 event = SimpleNamespace()
2773 2762 event.line = line
2774 2763 event.symbol = text
2775 2764 cmd = line.split(None,1)[0]
2776 2765 event.command = cmd
2777 2766 event.text_until_cursor = self.text_until_cursor
2778 2767
2779 2768 # for foo etc, try also to find completer for %foo
2780 2769 if not cmd.startswith(self.magic_escape):
2781 2770 try_magic = self.custom_completers.s_matches(
2782 2771 self.magic_escape + cmd)
2783 2772 else:
2784 2773 try_magic = []
2785 2774
2786 2775 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2787 2776 try_magic,
2788 2777 self.custom_completers.flat_matches(self.text_until_cursor)):
2789 2778 try:
2790 2779 res = c(event)
2791 2780 if res:
2792 2781 # first, try case sensitive match
2793 2782 withcase = [r for r in res if r.startswith(text)]
2794 2783 if withcase:
2795 2784 return withcase
2796 2785 # if none, then case insensitive ones are ok too
2797 2786 text_low = text.lower()
2798 2787 return [r for r in res if r.lower().startswith(text_low)]
2799 2788 except TryNext:
2800 2789 pass
2801 2790 except KeyboardInterrupt:
2802 2791 """
2803 2792 If custom completer take too long,
2804 2793 let keyboard interrupt abort and return nothing.
2805 2794 """
2806 2795 break
2807 2796
2808 2797 return None
2809 2798
2810 2799 def completions(self, text: str, offset: int)->Iterator[Completion]:
2811 2800 """
2812 2801 Returns an iterator over the possible completions
2813 2802
2814 2803 .. warning::
2815 2804
2816 2805 Unstable
2817 2806
2818 2807 This function is unstable, API may change without warning.
2819 2808 It will also raise unless use in proper context manager.
2820 2809
2821 2810 Parameters
2822 2811 ----------
2823 2812 text : str
2824 2813 Full text of the current input, multi line string.
2825 2814 offset : int
2826 2815 Integer representing the position of the cursor in ``text``. Offset
2827 2816 is 0-based indexed.
2828 2817
2829 2818 Yields
2830 2819 ------
2831 2820 Completion
2832 2821
2833 2822 Notes
2834 2823 -----
2835 2824 The cursor on a text can either be seen as being "in between"
2836 2825 characters or "On" a character depending on the interface visible to
2837 2826 the user. For consistency the cursor being on "in between" characters X
2838 2827 and Y is equivalent to the cursor being "on" character Y, that is to say
2839 2828 the character the cursor is on is considered as being after the cursor.
2840 2829
2841 2830 Combining characters may span more that one position in the
2842 2831 text.
2843 2832
2844 2833 .. note::
2845 2834
2846 2835 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2847 2836 fake Completion token to distinguish completion returned by Jedi
2848 2837 and usual IPython completion.
2849 2838
2850 2839 .. note::
2851 2840
2852 2841 Completions are not completely deduplicated yet. If identical
2853 2842 completions are coming from different sources this function does not
2854 2843 ensure that each completion object will only be present once.
2855 2844 """
2856 2845 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2857 2846 "It may change without warnings. "
2858 2847 "Use in corresponding context manager.",
2859 2848 category=ProvisionalCompleterWarning, stacklevel=2)
2860 2849
2861 2850 seen = set()
2862 2851 profiler:Optional[cProfile.Profile]
2863 2852 try:
2864 2853 if self.profile_completions:
2865 2854 import cProfile
2866 2855 profiler = cProfile.Profile()
2867 2856 profiler.enable()
2868 2857 else:
2869 2858 profiler = None
2870 2859
2871 2860 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2872 2861 if c and (c in seen):
2873 2862 continue
2874 2863 yield c
2875 2864 seen.add(c)
2876 2865 except KeyboardInterrupt:
2877 2866 """if completions take too long and users send keyboard interrupt,
2878 2867 do not crash and return ASAP. """
2879 2868 pass
2880 2869 finally:
2881 2870 if profiler is not None:
2882 2871 profiler.disable()
2883 2872 ensure_dir_exists(self.profiler_output_dir)
2884 2873 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2885 2874 print("Writing profiler output to", output_path)
2886 2875 profiler.dump_stats(output_path)
2887 2876
2888 2877 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2889 2878 """
2890 2879 Core completion module.Same signature as :any:`completions`, with the
2891 2880 extra `timeout` parameter (in seconds).
2892 2881
2893 2882 Computing jedi's completion ``.type`` can be quite expensive (it is a
2894 2883 lazy property) and can require some warm-up, more warm up than just
2895 2884 computing the ``name`` of a completion. The warm-up can be :
2896 2885
2897 2886 - Long warm-up the first time a module is encountered after
2898 2887 install/update: actually build parse/inference tree.
2899 2888
2900 2889 - first time the module is encountered in a session: load tree from
2901 2890 disk.
2902 2891
2903 2892 We don't want to block completions for tens of seconds so we give the
2904 2893 completer a "budget" of ``_timeout`` seconds per invocation to compute
2905 2894 completions types, the completions that have not yet been computed will
2906 2895 be marked as "unknown" an will have a chance to be computed next round
2907 2896 are things get cached.
2908 2897
2909 2898 Keep in mind that Jedi is not the only thing treating the completion so
2910 2899 keep the timeout short-ish as if we take more than 0.3 second we still
2911 2900 have lots of processing to do.
2912 2901
2913 2902 """
2914 2903 deadline = time.monotonic() + _timeout
2915 2904
2916 2905 before = full_text[:offset]
2917 2906 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2918 2907
2919 2908 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2920 2909
2921 2910 def is_non_jedi_result(
2922 2911 result: MatcherResult, identifier: str
2923 2912 ) -> TypeGuard[SimpleMatcherResult]:
2924 2913 return identifier != jedi_matcher_id
2925 2914
2926 2915 results = self._complete(
2927 2916 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2928 2917 )
2929 2918
2930 2919 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2931 2920 identifier: result
2932 2921 for identifier, result in results.items()
2933 2922 if is_non_jedi_result(result, identifier)
2934 2923 }
2935 2924
2936 2925 jedi_matches = (
2937 2926 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2938 2927 if jedi_matcher_id in results
2939 2928 else ()
2940 2929 )
2941 2930
2942 2931 iter_jm = iter(jedi_matches)
2943 2932 if _timeout:
2944 2933 for jm in iter_jm:
2945 2934 try:
2946 2935 type_ = jm.type
2947 2936 except Exception:
2948 2937 if self.debug:
2949 2938 print("Error in Jedi getting type of ", jm)
2950 2939 type_ = None
2951 2940 delta = len(jm.name_with_symbols) - len(jm.complete)
2952 2941 if type_ == 'function':
2953 2942 signature = _make_signature(jm)
2954 2943 else:
2955 2944 signature = ''
2956 2945 yield Completion(start=offset - delta,
2957 2946 end=offset,
2958 2947 text=jm.name_with_symbols,
2959 2948 type=type_,
2960 2949 signature=signature,
2961 2950 _origin='jedi')
2962 2951
2963 2952 if time.monotonic() > deadline:
2964 2953 break
2965 2954
2966 2955 for jm in iter_jm:
2967 2956 delta = len(jm.name_with_symbols) - len(jm.complete)
2968 2957 yield Completion(
2969 2958 start=offset - delta,
2970 2959 end=offset,
2971 2960 text=jm.name_with_symbols,
2972 2961 type=_UNKNOWN_TYPE, # don't compute type for speed
2973 2962 _origin="jedi",
2974 2963 signature="",
2975 2964 )
2976 2965
2977 2966 # TODO:
2978 2967 # Suppress this, right now just for debug.
2979 2968 if jedi_matches and non_jedi_results and self.debug:
2980 2969 some_start_offset = before.rfind(
2981 2970 next(iter(non_jedi_results.values()))["matched_fragment"]
2982 2971 )
2983 2972 yield Completion(
2984 2973 start=some_start_offset,
2985 2974 end=offset,
2986 2975 text="--jedi/ipython--",
2987 2976 _origin="debug",
2988 2977 type="none",
2989 2978 signature="",
2990 2979 )
2991 2980
2992 2981 ordered: List[Completion] = []
2993 2982 sortable: List[Completion] = []
2994 2983
2995 2984 for origin, result in non_jedi_results.items():
2996 2985 matched_text = result["matched_fragment"]
2997 2986 start_offset = before.rfind(matched_text)
2998 2987 is_ordered = result.get("ordered", False)
2999 2988 container = ordered if is_ordered else sortable
3000 2989
3001 2990 # I'm unsure if this is always true, so let's assert and see if it
3002 2991 # crash
3003 2992 assert before.endswith(matched_text)
3004 2993
3005 2994 for simple_completion in result["completions"]:
3006 2995 completion = Completion(
3007 2996 start=start_offset,
3008 2997 end=offset,
3009 2998 text=simple_completion.text,
3010 2999 _origin=origin,
3011 3000 signature="",
3012 3001 type=simple_completion.type or _UNKNOWN_TYPE,
3013 3002 )
3014 3003 container.append(completion)
3015 3004
3016 3005 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
3017 3006 :MATCHES_LIMIT
3018 3007 ]
3019 3008
3020 3009 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
3021 3010 """Find completions for the given text and line context.
3022 3011
3023 3012 Note that both the text and the line_buffer are optional, but at least
3024 3013 one of them must be given.
3025 3014
3026 3015 Parameters
3027 3016 ----------
3028 3017 text : string, optional
3029 3018 Text to perform the completion on. If not given, the line buffer
3030 3019 is split using the instance's CompletionSplitter object.
3031 3020 line_buffer : string, optional
3032 3021 If not given, the completer attempts to obtain the current line
3033 3022 buffer via readline. This keyword allows clients which are
3034 3023 requesting for text completions in non-readline contexts to inform
3035 3024 the completer of the entire text.
3036 3025 cursor_pos : int, optional
3037 3026 Index of the cursor in the full line buffer. Should be provided by
3038 3027 remote frontends where kernel has no access to frontend state.
3039 3028
3040 3029 Returns
3041 3030 -------
3042 3031 Tuple of two items:
3043 3032 text : str
3044 3033 Text that was actually used in the completion.
3045 3034 matches : list
3046 3035 A list of completion matches.
3047 3036
3048 3037 Notes
3049 3038 -----
3050 3039 This API is likely to be deprecated and replaced by
3051 3040 :any:`IPCompleter.completions` in the future.
3052 3041
3053 3042 """
3054 3043 warnings.warn('`Completer.complete` is pending deprecation since '
3055 3044 'IPython 6.0 and will be replaced by `Completer.completions`.',
3056 3045 PendingDeprecationWarning)
3057 3046 # potential todo, FOLD the 3rd throw away argument of _complete
3058 3047 # into the first 2 one.
3059 3048 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
3060 3049 # TODO: should we deprecate now, or does it stay?
3061 3050
3062 3051 results = self._complete(
3063 3052 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
3064 3053 )
3065 3054
3066 3055 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3067 3056
3068 3057 return self._arrange_and_extract(
3069 3058 results,
3070 3059 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
3071 3060 skip_matchers={jedi_matcher_id},
3072 3061 # this API does not support different start/end positions (fragments of token).
3073 3062 abort_if_offset_changes=True,
3074 3063 )
3075 3064
3076 3065 def _arrange_and_extract(
3077 3066 self,
3078 3067 results: Dict[str, MatcherResult],
3079 3068 skip_matchers: Set[str],
3080 3069 abort_if_offset_changes: bool,
3081 3070 ):
3082 3071 sortable: List[AnyMatcherCompletion] = []
3083 3072 ordered: List[AnyMatcherCompletion] = []
3084 3073 most_recent_fragment = None
3085 3074 for identifier, result in results.items():
3086 3075 if identifier in skip_matchers:
3087 3076 continue
3088 3077 if not result["completions"]:
3089 3078 continue
3090 3079 if not most_recent_fragment:
3091 3080 most_recent_fragment = result["matched_fragment"]
3092 3081 if (
3093 3082 abort_if_offset_changes
3094 3083 and result["matched_fragment"] != most_recent_fragment
3095 3084 ):
3096 3085 break
3097 3086 if result.get("ordered", False):
3098 3087 ordered.extend(result["completions"])
3099 3088 else:
3100 3089 sortable.extend(result["completions"])
3101 3090
3102 3091 if not most_recent_fragment:
3103 3092 most_recent_fragment = "" # to satisfy typechecker (and just in case)
3104 3093
3105 3094 return most_recent_fragment, [
3106 3095 m.text for m in self._deduplicate(ordered + self._sort(sortable))
3107 3096 ]
3108 3097
3109 3098 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
3110 3099 full_text=None) -> _CompleteResult:
3111 3100 """
3112 3101 Like complete but can also returns raw jedi completions as well as the
3113 3102 origin of the completion text. This could (and should) be made much
3114 3103 cleaner but that will be simpler once we drop the old (and stateful)
3115 3104 :any:`complete` API.
3116 3105
3117 3106 With current provisional API, cursor_pos act both (depending on the
3118 3107 caller) as the offset in the ``text`` or ``line_buffer``, or as the
3119 3108 ``column`` when passing multiline strings this could/should be renamed
3120 3109 but would add extra noise.
3121 3110
3122 3111 Parameters
3123 3112 ----------
3124 3113 cursor_line
3125 3114 Index of the line the cursor is on. 0 indexed.
3126 3115 cursor_pos
3127 3116 Position of the cursor in the current line/line_buffer/text. 0
3128 3117 indexed.
3129 3118 line_buffer : optional, str
3130 3119 The current line the cursor is in, this is mostly due to legacy
3131 3120 reason that readline could only give a us the single current line.
3132 3121 Prefer `full_text`.
3133 3122 text : str
3134 3123 The current "token" the cursor is in, mostly also for historical
3135 3124 reasons. as the completer would trigger only after the current line
3136 3125 was parsed.
3137 3126 full_text : str
3138 3127 Full text of the current cell.
3139 3128
3140 3129 Returns
3141 3130 -------
3142 3131 An ordered dictionary where keys are identifiers of completion
3143 3132 matchers and values are ``MatcherResult``s.
3144 3133 """
3145 3134
3146 3135 # if the cursor position isn't given, the only sane assumption we can
3147 3136 # make is that it's at the end of the line (the common case)
3148 3137 if cursor_pos is None:
3149 3138 cursor_pos = len(line_buffer) if text is None else len(text)
3150 3139
3151 3140 if self.use_main_ns:
3152 3141 self.namespace = __main__.__dict__
3153 3142
3154 3143 # if text is either None or an empty string, rely on the line buffer
3155 3144 if (not line_buffer) and full_text:
3156 3145 line_buffer = full_text.split('\n')[cursor_line]
3157 3146 if not text: # issue #11508: check line_buffer before calling split_line
3158 3147 text = (
3159 3148 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
3160 3149 )
3161 3150
3162 3151 # If no line buffer is given, assume the input text is all there was
3163 3152 if line_buffer is None:
3164 3153 line_buffer = text
3165 3154
3166 3155 # deprecated - do not use `line_buffer` in new code.
3167 3156 self.line_buffer = line_buffer
3168 3157 self.text_until_cursor = self.line_buffer[:cursor_pos]
3169 3158
3170 3159 if not full_text:
3171 3160 full_text = line_buffer
3172 3161
3173 3162 context = CompletionContext(
3174 3163 full_text=full_text,
3175 3164 cursor_position=cursor_pos,
3176 3165 cursor_line=cursor_line,
3177 3166 token=text,
3178 3167 limit=MATCHES_LIMIT,
3179 3168 )
3180 3169
3181 3170 # Start with a clean slate of completions
3182 3171 results: Dict[str, MatcherResult] = {}
3183 3172
3184 3173 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3185 3174
3186 3175 suppressed_matchers: Set[str] = set()
3187 3176
3188 3177 matchers = {
3189 3178 _get_matcher_id(matcher): matcher
3190 3179 for matcher in sorted(
3191 3180 self.matchers, key=_get_matcher_priority, reverse=True
3192 3181 )
3193 3182 }
3194 3183
3195 3184 for matcher_id, matcher in matchers.items():
3196 3185 matcher_id = _get_matcher_id(matcher)
3197 3186
3198 3187 if matcher_id in self.disable_matchers:
3199 3188 continue
3200 3189
3201 3190 if matcher_id in results:
3202 3191 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
3203 3192
3204 3193 if matcher_id in suppressed_matchers:
3205 3194 continue
3206 3195
3207 3196 result: MatcherResult
3208 3197 try:
3209 3198 if _is_matcher_v1(matcher):
3210 3199 result = _convert_matcher_v1_result_to_v2(
3211 3200 matcher(text), type=_UNKNOWN_TYPE
3212 3201 )
3213 3202 elif _is_matcher_v2(matcher):
3214 3203 result = matcher(context)
3215 3204 else:
3216 3205 api_version = _get_matcher_api_version(matcher)
3217 3206 raise ValueError(f"Unsupported API version {api_version}")
3218 3207 except:
3219 3208 # Show the ugly traceback if the matcher causes an
3220 3209 # exception, but do NOT crash the kernel!
3221 3210 sys.excepthook(*sys.exc_info())
3222 3211 continue
3223 3212
3224 3213 # set default value for matched fragment if suffix was not selected.
3225 3214 result["matched_fragment"] = result.get("matched_fragment", context.token)
3226 3215
3227 3216 if not suppressed_matchers:
3228 3217 suppression_recommended: Union[bool, Set[str]] = result.get(
3229 3218 "suppress", False
3230 3219 )
3231 3220
3232 3221 suppression_config = (
3233 3222 self.suppress_competing_matchers.get(matcher_id, None)
3234 3223 if isinstance(self.suppress_competing_matchers, dict)
3235 3224 else self.suppress_competing_matchers
3236 3225 )
3237 3226 should_suppress = (
3238 3227 (suppression_config is True)
3239 3228 or (suppression_recommended and (suppression_config is not False))
3240 3229 ) and has_any_completions(result)
3241 3230
3242 3231 if should_suppress:
3243 3232 suppression_exceptions: Set[str] = result.get(
3244 3233 "do_not_suppress", set()
3245 3234 )
3246 3235 if isinstance(suppression_recommended, Iterable):
3247 3236 to_suppress = set(suppression_recommended)
3248 3237 else:
3249 3238 to_suppress = set(matchers)
3250 3239 suppressed_matchers = to_suppress - suppression_exceptions
3251 3240
3252 3241 new_results = {}
3253 3242 for previous_matcher_id, previous_result in results.items():
3254 3243 if previous_matcher_id not in suppressed_matchers:
3255 3244 new_results[previous_matcher_id] = previous_result
3256 3245 results = new_results
3257 3246
3258 3247 results[matcher_id] = result
3259 3248
3260 3249 _, matches = self._arrange_and_extract(
3261 3250 results,
3262 3251 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
3263 3252 # if it was omission, we can remove the filtering step, otherwise remove this comment.
3264 3253 skip_matchers={jedi_matcher_id},
3265 3254 abort_if_offset_changes=False,
3266 3255 )
3267 3256
3268 3257 # populate legacy stateful API
3269 3258 self.matches = matches
3270 3259
3271 3260 return results
3272 3261
3273 3262 @staticmethod
3274 3263 def _deduplicate(
3275 3264 matches: Sequence[AnyCompletion],
3276 3265 ) -> Iterable[AnyCompletion]:
3277 3266 filtered_matches: Dict[str, AnyCompletion] = {}
3278 3267 for match in matches:
3279 3268 text = match.text
3280 3269 if (
3281 3270 text not in filtered_matches
3282 3271 or filtered_matches[text].type == _UNKNOWN_TYPE
3283 3272 ):
3284 3273 filtered_matches[text] = match
3285 3274
3286 3275 return filtered_matches.values()
3287 3276
3288 3277 @staticmethod
3289 3278 def _sort(matches: Sequence[AnyCompletion]):
3290 3279 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3291 3280
3292 3281 @context_matcher()
3293 3282 def fwd_unicode_matcher(self, context: CompletionContext):
3294 3283 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
3295 3284 # TODO: use `context.limit` to terminate early once we matched the maximum
3296 3285 # number that will be used downstream; can be added as an optional to
3297 3286 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
3298 3287 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
3299 3288 return _convert_matcher_v1_result_to_v2(
3300 3289 matches, type="unicode", fragment=fragment, suppress_if_matches=True
3301 3290 )
3302 3291
3303 3292 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
3304 3293 """
3305 3294 Forward match a string starting with a backslash with a list of
3306 3295 potential Unicode completions.
3307 3296
3308 3297 Will compute list of Unicode character names on first call and cache it.
3309 3298
3310 3299 .. deprecated:: 8.6
3311 3300 You can use :meth:`fwd_unicode_matcher` instead.
3312 3301
3313 3302 Returns
3314 3303 -------
3315 3304 At tuple with:
3316 3305 - matched text (empty if no matches)
3317 3306 - list of potential completions, empty tuple otherwise)
3318 3307 """
3319 3308 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
3320 3309 # We could do a faster match using a Trie.
3321 3310
3322 3311 # Using pygtrie the following seem to work:
3323 3312
3324 3313 # s = PrefixSet()
3325 3314
3326 3315 # for c in range(0,0x10FFFF + 1):
3327 3316 # try:
3328 3317 # s.add(unicodedata.name(chr(c)))
3329 3318 # except ValueError:
3330 3319 # pass
3331 3320 # [''.join(k) for k in s.iter(prefix)]
3332 3321
3333 3322 # But need to be timed and adds an extra dependency.
3334 3323
3335 3324 slashpos = text.rfind('\\')
3336 3325 # if text starts with slash
3337 3326 if slashpos > -1:
3338 3327 # PERF: It's important that we don't access self._unicode_names
3339 3328 # until we're inside this if-block. _unicode_names is lazily
3340 3329 # initialized, and it takes a user-noticeable amount of time to
3341 3330 # initialize it, so we don't want to initialize it unless we're
3342 3331 # actually going to use it.
3343 3332 s = text[slashpos + 1 :]
3344 3333 sup = s.upper()
3345 3334 candidates = [x for x in self.unicode_names if x.startswith(sup)]
3346 3335 if candidates:
3347 3336 return s, candidates
3348 3337 candidates = [x for x in self.unicode_names if sup in x]
3349 3338 if candidates:
3350 3339 return s, candidates
3351 3340 splitsup = sup.split(" ")
3352 3341 candidates = [
3353 3342 x for x in self.unicode_names if all(u in x for u in splitsup)
3354 3343 ]
3355 3344 if candidates:
3356 3345 return s, candidates
3357 3346
3358 3347 return "", ()
3359 3348
3360 3349 # if text does not start with slash
3361 3350 else:
3362 3351 return '', ()
3363 3352
3364 3353 @property
3365 3354 def unicode_names(self) -> List[str]:
3366 3355 """List of names of unicode code points that can be completed.
3367 3356
3368 3357 The list is lazily initialized on first access.
3369 3358 """
3370 3359 if self._unicode_names is None:
3371 3360 names = []
3372 3361 for c in range(0,0x10FFFF + 1):
3373 3362 try:
3374 3363 names.append(unicodedata.name(chr(c)))
3375 3364 except ValueError:
3376 3365 pass
3377 3366 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
3378 3367
3379 3368 return self._unicode_names
3380 3369
3381 3370 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
3382 3371 names = []
3383 3372 for start,stop in ranges:
3384 3373 for c in range(start, stop) :
3385 3374 try:
3386 3375 names.append(unicodedata.name(chr(c)))
3387 3376 except ValueError:
3388 3377 pass
3389 3378 return names
@@ -1,895 +1,892
1 1 from inspect import isclass, signature, Signature
2 2 from typing import (
3 3 Annotated,
4 4 AnyStr,
5 5 Callable,
6 6 Dict,
7 7 Literal,
8 8 NamedTuple,
9 9 NewType,
10 10 Optional,
11 11 Protocol,
12 12 Set,
13 13 Sequence,
14 14 Tuple,
15 15 Type,
16 16 TypeGuard,
17 17 Union,
18 18 get_args,
19 19 get_origin,
20 20 is_typeddict,
21 21 )
22 22 import ast
23 23 import builtins
24 24 import collections
25 25 import operator
26 26 import sys
27 27 from functools import cached_property
28 28 from dataclasses import dataclass, field
29 29 from types import MethodDescriptorType, ModuleType
30 30
31 31 from IPython.utils.decorators import undoc
32 32
33 33
34 if sys.version_info < (3, 11):
35 from typing_extensions import Self, LiteralString
36 else:
37 from typing import Self, LiteralString
34 from typing import Self, LiteralString
38 35
39 36 if sys.version_info < (3, 12):
40 37 from typing_extensions import TypeAliasType
41 38 else:
42 39 from typing import TypeAliasType
43 40
44 41
45 42 @undoc
46 43 class HasGetItem(Protocol):
47 44 def __getitem__(self, key) -> None: ...
48 45
49 46
50 47 @undoc
51 48 class InstancesHaveGetItem(Protocol):
52 49 def __call__(self, *args, **kwargs) -> HasGetItem: ...
53 50
54 51
55 52 @undoc
56 53 class HasGetAttr(Protocol):
57 54 def __getattr__(self, key) -> None: ...
58 55
59 56
60 57 @undoc
61 58 class DoesNotHaveGetAttr(Protocol):
62 59 pass
63 60
64 61
65 62 # By default `__getattr__` is not explicitly implemented on most objects
66 63 MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr]
67 64
68 65
69 66 def _unbind_method(func: Callable) -> Union[Callable, None]:
70 67 """Get unbound method for given bound method.
71 68
72 69 Returns None if cannot get unbound method, or method is already unbound.
73 70 """
74 71 owner = getattr(func, "__self__", None)
75 72 owner_class = type(owner)
76 73 name = getattr(func, "__name__", None)
77 74 instance_dict_overrides = getattr(owner, "__dict__", None)
78 75 if (
79 76 owner is not None
80 77 and name
81 78 and (
82 79 not instance_dict_overrides
83 80 or (instance_dict_overrides and name not in instance_dict_overrides)
84 81 )
85 82 ):
86 83 return getattr(owner_class, name)
87 84 return None
88 85
89 86
90 87 @undoc
91 88 @dataclass
92 89 class EvaluationPolicy:
93 90 """Definition of evaluation policy."""
94 91
95 92 allow_locals_access: bool = False
96 93 allow_globals_access: bool = False
97 94 allow_item_access: bool = False
98 95 allow_attr_access: bool = False
99 96 allow_builtins_access: bool = False
100 97 allow_all_operations: bool = False
101 98 allow_any_calls: bool = False
102 99 allowed_calls: Set[Callable] = field(default_factory=set)
103 100
104 101 def can_get_item(self, value, item):
105 102 return self.allow_item_access
106 103
107 104 def can_get_attr(self, value, attr):
108 105 return self.allow_attr_access
109 106
110 107 def can_operate(self, dunders: Tuple[str, ...], a, b=None):
111 108 if self.allow_all_operations:
112 109 return True
113 110
114 111 def can_call(self, func):
115 112 if self.allow_any_calls:
116 113 return True
117 114
118 115 if func in self.allowed_calls:
119 116 return True
120 117
121 118 owner_method = _unbind_method(func)
122 119
123 120 if owner_method and owner_method in self.allowed_calls:
124 121 return True
125 122
126 123
127 124 def _get_external(module_name: str, access_path: Sequence[str]):
128 125 """Get value from external module given a dotted access path.
129 126
130 127 Raises:
131 128 * `KeyError` if module is removed not found, and
132 129 * `AttributeError` if access path does not match an exported object
133 130 """
134 131 member_type = sys.modules[module_name]
135 132 for attr in access_path:
136 133 member_type = getattr(member_type, attr)
137 134 return member_type
138 135
139 136
140 137 def _has_original_dunder_external(
141 138 value,
142 139 module_name: str,
143 140 access_path: Sequence[str],
144 141 method_name: str,
145 142 ):
146 143 if module_name not in sys.modules:
147 144 # LBYLB as it is faster
148 145 return False
149 146 try:
150 147 member_type = _get_external(module_name, access_path)
151 148 value_type = type(value)
152 149 if type(value) == member_type:
153 150 return True
154 151 if method_name == "__getattribute__":
155 152 # we have to short-circuit here due to an unresolved issue in
156 153 # `isinstance` implementation: https://bugs.python.org/issue32683
157 154 return False
158 155 if isinstance(value, member_type):
159 156 method = getattr(value_type, method_name, None)
160 157 member_method = getattr(member_type, method_name, None)
161 158 if member_method == method:
162 159 return True
163 160 except (AttributeError, KeyError):
164 161 return False
165 162
166 163
167 164 def _has_original_dunder(
168 165 value, allowed_types, allowed_methods, allowed_external, method_name
169 166 ):
170 167 # note: Python ignores `__getattr__`/`__getitem__` on instances,
171 168 # we only need to check at class level
172 169 value_type = type(value)
173 170
174 171 # strict type check passes → no need to check method
175 172 if value_type in allowed_types:
176 173 return True
177 174
178 175 method = getattr(value_type, method_name, None)
179 176
180 177 if method is None:
181 178 return None
182 179
183 180 if method in allowed_methods:
184 181 return True
185 182
186 183 for module_name, *access_path in allowed_external:
187 184 if _has_original_dunder_external(value, module_name, access_path, method_name):
188 185 return True
189 186
190 187 return False
191 188
192 189
193 190 @undoc
194 191 @dataclass
195 192 class SelectivePolicy(EvaluationPolicy):
196 193 allowed_getitem: Set[InstancesHaveGetItem] = field(default_factory=set)
197 194 allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set)
198 195
199 196 allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set)
200 197 allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set)
201 198
202 199 allowed_operations: Set = field(default_factory=set)
203 200 allowed_operations_external: Set[Tuple[str, ...]] = field(default_factory=set)
204 201
205 202 _operation_methods_cache: Dict[str, Set[Callable]] = field(
206 203 default_factory=dict, init=False
207 204 )
208 205
209 206 def can_get_attr(self, value, attr):
210 207 has_original_attribute = _has_original_dunder(
211 208 value,
212 209 allowed_types=self.allowed_getattr,
213 210 allowed_methods=self._getattribute_methods,
214 211 allowed_external=self.allowed_getattr_external,
215 212 method_name="__getattribute__",
216 213 )
217 214 has_original_attr = _has_original_dunder(
218 215 value,
219 216 allowed_types=self.allowed_getattr,
220 217 allowed_methods=self._getattr_methods,
221 218 allowed_external=self.allowed_getattr_external,
222 219 method_name="__getattr__",
223 220 )
224 221
225 222 accept = False
226 223
227 224 # Many objects do not have `__getattr__`, this is fine.
228 225 if has_original_attr is None and has_original_attribute:
229 226 accept = True
230 227 else:
231 228 # Accept objects without modifications to `__getattr__` and `__getattribute__`
232 229 accept = has_original_attr and has_original_attribute
233 230
234 231 if accept:
235 232 # We still need to check for overridden properties.
236 233
237 234 value_class = type(value)
238 235 if not hasattr(value_class, attr):
239 236 return True
240 237
241 238 class_attr_val = getattr(value_class, attr)
242 239 is_property = isinstance(class_attr_val, property)
243 240
244 241 if not is_property:
245 242 return True
246 243
247 244 # Properties in allowed types are ok (although we do not include any
248 245 # properties in our default allow list currently).
249 246 if type(value) in self.allowed_getattr:
250 247 return True # pragma: no cover
251 248
252 249 # Properties in subclasses of allowed types may be ok if not changed
253 250 for module_name, *access_path in self.allowed_getattr_external:
254 251 try:
255 252 external_class = _get_external(module_name, access_path)
256 253 external_class_attr_val = getattr(external_class, attr)
257 254 except (KeyError, AttributeError):
258 255 return False # pragma: no cover
259 256 return class_attr_val == external_class_attr_val
260 257
261 258 return False
262 259
263 260 def can_get_item(self, value, item):
264 261 """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified."""
265 262 return _has_original_dunder(
266 263 value,
267 264 allowed_types=self.allowed_getitem,
268 265 allowed_methods=self._getitem_methods,
269 266 allowed_external=self.allowed_getitem_external,
270 267 method_name="__getitem__",
271 268 )
272 269
273 270 def can_operate(self, dunders: Tuple[str, ...], a, b=None):
274 271 objects = [a]
275 272 if b is not None:
276 273 objects.append(b)
277 274 return all(
278 275 [
279 276 _has_original_dunder(
280 277 obj,
281 278 allowed_types=self.allowed_operations,
282 279 allowed_methods=self._operator_dunder_methods(dunder),
283 280 allowed_external=self.allowed_operations_external,
284 281 method_name=dunder,
285 282 )
286 283 for dunder in dunders
287 284 for obj in objects
288 285 ]
289 286 )
290 287
291 288 def _operator_dunder_methods(self, dunder: str) -> Set[Callable]:
292 289 if dunder not in self._operation_methods_cache:
293 290 self._operation_methods_cache[dunder] = self._safe_get_methods(
294 291 self.allowed_operations, dunder
295 292 )
296 293 return self._operation_methods_cache[dunder]
297 294
298 295 @cached_property
299 296 def _getitem_methods(self) -> Set[Callable]:
300 297 return self._safe_get_methods(self.allowed_getitem, "__getitem__")
301 298
302 299 @cached_property
303 300 def _getattr_methods(self) -> Set[Callable]:
304 301 return self._safe_get_methods(self.allowed_getattr, "__getattr__")
305 302
306 303 @cached_property
307 304 def _getattribute_methods(self) -> Set[Callable]:
308 305 return self._safe_get_methods(self.allowed_getattr, "__getattribute__")
309 306
310 307 def _safe_get_methods(self, classes, name) -> Set[Callable]:
311 308 return {
312 309 method
313 310 for class_ in classes
314 311 for method in [getattr(class_, name, None)]
315 312 if method
316 313 }
317 314
318 315
319 316 class _DummyNamedTuple(NamedTuple):
320 317 """Used internally to retrieve methods of named tuple instance."""
321 318
322 319
323 320 class EvaluationContext(NamedTuple):
324 321 #: Local namespace
325 322 locals: dict
326 323 #: Global namespace
327 324 globals: dict
328 325 #: Evaluation policy identifier
329 326 evaluation: Literal["forbidden", "minimal", "limited", "unsafe", "dangerous"] = (
330 327 "forbidden"
331 328 )
332 329 #: Whether the evaluation of code takes place inside of a subscript.
333 330 #: Useful for evaluating ``:-1, 'col'`` in ``df[:-1, 'col']``.
334 331 in_subscript: bool = False
335 332
336 333
337 334 class _IdentitySubscript:
338 335 """Returns the key itself when item is requested via subscript."""
339 336
340 337 def __getitem__(self, key):
341 338 return key
342 339
343 340
344 341 IDENTITY_SUBSCRIPT = _IdentitySubscript()
345 342 SUBSCRIPT_MARKER = "__SUBSCRIPT_SENTINEL__"
346 343 UNKNOWN_SIGNATURE = Signature()
347 344 NOT_EVALUATED = object()
348 345
349 346
350 347 class GuardRejection(Exception):
351 348 """Exception raised when guard rejects evaluation attempt."""
352 349
353 350 pass
354 351
355 352
356 353 def guarded_eval(code: str, context: EvaluationContext):
357 354 """Evaluate provided code in the evaluation context.
358 355
359 356 If evaluation policy given by context is set to ``forbidden``
360 357 no evaluation will be performed; if it is set to ``dangerous``
361 358 standard :func:`eval` will be used; finally, for any other,
362 359 policy :func:`eval_node` will be called on parsed AST.
363 360 """
364 361 locals_ = context.locals
365 362
366 363 if context.evaluation == "forbidden":
367 364 raise GuardRejection("Forbidden mode")
368 365
369 366 # note: not using `ast.literal_eval` as it does not implement
370 367 # getitem at all, for example it fails on simple `[0][1]`
371 368
372 369 if context.in_subscript:
373 370 # syntactic sugar for ellipsis (:) is only available in subscripts
374 371 # so we need to trick the ast parser into thinking that we have
375 372 # a subscript, but we need to be able to later recognise that we did
376 373 # it so we can ignore the actual __getitem__ operation
377 374 if not code:
378 375 return tuple()
379 376 locals_ = locals_.copy()
380 377 locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT
381 378 code = SUBSCRIPT_MARKER + "[" + code + "]"
382 379 context = EvaluationContext(**{**context._asdict(), **{"locals": locals_}})
383 380
384 381 if context.evaluation == "dangerous":
385 382 return eval(code, context.globals, context.locals)
386 383
387 384 expression = ast.parse(code, mode="eval")
388 385
389 386 return eval_node(expression, context)
390 387
391 388
392 389 BINARY_OP_DUNDERS: Dict[Type[ast.operator], Tuple[str]] = {
393 390 ast.Add: ("__add__",),
394 391 ast.Sub: ("__sub__",),
395 392 ast.Mult: ("__mul__",),
396 393 ast.Div: ("__truediv__",),
397 394 ast.FloorDiv: ("__floordiv__",),
398 395 ast.Mod: ("__mod__",),
399 396 ast.Pow: ("__pow__",),
400 397 ast.LShift: ("__lshift__",),
401 398 ast.RShift: ("__rshift__",),
402 399 ast.BitOr: ("__or__",),
403 400 ast.BitXor: ("__xor__",),
404 401 ast.BitAnd: ("__and__",),
405 402 ast.MatMult: ("__matmul__",),
406 403 }
407 404
408 405 COMP_OP_DUNDERS: Dict[Type[ast.cmpop], Tuple[str, ...]] = {
409 406 ast.Eq: ("__eq__",),
410 407 ast.NotEq: ("__ne__", "__eq__"),
411 408 ast.Lt: ("__lt__", "__gt__"),
412 409 ast.LtE: ("__le__", "__ge__"),
413 410 ast.Gt: ("__gt__", "__lt__"),
414 411 ast.GtE: ("__ge__", "__le__"),
415 412 ast.In: ("__contains__",),
416 413 # Note: ast.Is, ast.IsNot, ast.NotIn are handled specially
417 414 }
418 415
419 416 UNARY_OP_DUNDERS: Dict[Type[ast.unaryop], Tuple[str, ...]] = {
420 417 ast.USub: ("__neg__",),
421 418 ast.UAdd: ("__pos__",),
422 419 # we have to check both __inv__ and __invert__!
423 420 ast.Invert: ("__invert__", "__inv__"),
424 421 ast.Not: ("__not__",),
425 422 }
426 423
427 424
428 425 class ImpersonatingDuck:
429 426 """A dummy class used to create objects of other classes without calling their ``__init__``"""
430 427
431 428 # no-op: override __class__ to impersonate
432 429
433 430
434 431 class _Duck:
435 432 """A dummy class used to create objects pretending to have given attributes"""
436 433
437 434 def __init__(self, attributes: Optional[dict] = None, items: Optional[dict] = None):
438 435 self.attributes = attributes or {}
439 436 self.items = items or {}
440 437
441 438 def __getattr__(self, attr: str):
442 439 return self.attributes[attr]
443 440
444 441 def __hasattr__(self, attr: str):
445 442 return attr in self.attributes
446 443
447 444 def __dir__(self):
448 445 return [*dir(super), *self.attributes]
449 446
450 447 def __getitem__(self, key: str):
451 448 return self.items[key]
452 449
453 450 def __hasitem__(self, key: str):
454 451 return self.items[key]
455 452
456 453 def _ipython_key_completions_(self):
457 454 return self.items.keys()
458 455
459 456
460 457 def _find_dunder(node_op, dunders) -> Union[Tuple[str, ...], None]:
461 458 dunder = None
462 459 for op, candidate_dunder in dunders.items():
463 460 if isinstance(node_op, op):
464 461 dunder = candidate_dunder
465 462 return dunder
466 463
467 464
468 465 def eval_node(node: Union[ast.AST, None], context: EvaluationContext):
469 466 """Evaluate AST node in provided context.
470 467
471 468 Applies evaluation restrictions defined in the context. Currently does not support evaluation of functions with keyword arguments.
472 469
473 470 Does not evaluate actions that always have side effects:
474 471
475 472 - class definitions (``class sth: ...``)
476 473 - function definitions (``def sth: ...``)
477 474 - variable assignments (``x = 1``)
478 475 - augmented assignments (``x += 1``)
479 476 - deletions (``del x``)
480 477
481 478 Does not evaluate operations which do not return values:
482 479
483 480 - assertions (``assert x``)
484 481 - pass (``pass``)
485 482 - imports (``import x``)
486 483 - control flow:
487 484
488 485 - conditionals (``if x:``) except for ternary IfExp (``a if x else b``)
489 486 - loops (``for`` and ``while``)
490 487 - exception handling
491 488
492 489 The purpose of this function is to guard against unwanted side-effects;
493 490 it does not give guarantees on protection from malicious code execution.
494 491 """
495 492 policy = EVALUATION_POLICIES[context.evaluation]
496 493 if node is None:
497 494 return None
498 495 if isinstance(node, ast.Expression):
499 496 return eval_node(node.body, context)
500 497 if isinstance(node, ast.BinOp):
501 498 left = eval_node(node.left, context)
502 499 right = eval_node(node.right, context)
503 500 dunders = _find_dunder(node.op, BINARY_OP_DUNDERS)
504 501 if dunders:
505 502 if policy.can_operate(dunders, left, right):
506 503 return getattr(left, dunders[0])(right)
507 504 else:
508 505 raise GuardRejection(
509 506 f"Operation (`{dunders}`) for",
510 507 type(left),
511 508 f"not allowed in {context.evaluation} mode",
512 509 )
513 510 if isinstance(node, ast.Compare):
514 511 left = eval_node(node.left, context)
515 512 all_true = True
516 513 negate = False
517 514 for op, right in zip(node.ops, node.comparators):
518 515 right = eval_node(right, context)
519 516 dunder = None
520 517 dunders = _find_dunder(op, COMP_OP_DUNDERS)
521 518 if not dunders:
522 519 if isinstance(op, ast.NotIn):
523 520 dunders = COMP_OP_DUNDERS[ast.In]
524 521 negate = True
525 522 if isinstance(op, ast.Is):
526 523 dunder = "is_"
527 524 if isinstance(op, ast.IsNot):
528 525 dunder = "is_"
529 526 negate = True
530 527 if not dunder and dunders:
531 528 dunder = dunders[0]
532 529 if dunder:
533 530 a, b = (right, left) if dunder == "__contains__" else (left, right)
534 531 if dunder == "is_" or dunders and policy.can_operate(dunders, a, b):
535 532 result = getattr(operator, dunder)(a, b)
536 533 if negate:
537 534 result = not result
538 535 if not result:
539 536 all_true = False
540 537 left = right
541 538 else:
542 539 raise GuardRejection(
543 540 f"Comparison (`{dunder}`) for",
544 541 type(left),
545 542 f"not allowed in {context.evaluation} mode",
546 543 )
547 544 else:
548 545 raise ValueError(
549 546 f"Comparison `{dunder}` not supported"
550 547 ) # pragma: no cover
551 548 return all_true
552 549 if isinstance(node, ast.Constant):
553 550 return node.value
554 551 if isinstance(node, ast.Tuple):
555 552 return tuple(eval_node(e, context) for e in node.elts)
556 553 if isinstance(node, ast.List):
557 554 return [eval_node(e, context) for e in node.elts]
558 555 if isinstance(node, ast.Set):
559 556 return {eval_node(e, context) for e in node.elts}
560 557 if isinstance(node, ast.Dict):
561 558 return dict(
562 559 zip(
563 560 [eval_node(k, context) for k in node.keys],
564 561 [eval_node(v, context) for v in node.values],
565 562 )
566 563 )
567 564 if isinstance(node, ast.Slice):
568 565 return slice(
569 566 eval_node(node.lower, context),
570 567 eval_node(node.upper, context),
571 568 eval_node(node.step, context),
572 569 )
573 570 if isinstance(node, ast.UnaryOp):
574 571 value = eval_node(node.operand, context)
575 572 dunders = _find_dunder(node.op, UNARY_OP_DUNDERS)
576 573 if dunders:
577 574 if policy.can_operate(dunders, value):
578 575 return getattr(value, dunders[0])()
579 576 else:
580 577 raise GuardRejection(
581 578 f"Operation (`{dunders}`) for",
582 579 type(value),
583 580 f"not allowed in {context.evaluation} mode",
584 581 )
585 582 if isinstance(node, ast.Subscript):
586 583 value = eval_node(node.value, context)
587 584 slice_ = eval_node(node.slice, context)
588 585 if policy.can_get_item(value, slice_):
589 586 return value[slice_]
590 587 raise GuardRejection(
591 588 "Subscript access (`__getitem__`) for",
592 589 type(value), # not joined to avoid calling `repr`
593 590 f" not allowed in {context.evaluation} mode",
594 591 )
595 592 if isinstance(node, ast.Name):
596 593 return _eval_node_name(node.id, context)
597 594 if isinstance(node, ast.Attribute):
598 595 value = eval_node(node.value, context)
599 596 if policy.can_get_attr(value, node.attr):
600 597 return getattr(value, node.attr)
601 598 raise GuardRejection(
602 599 "Attribute access (`__getattr__`) for",
603 600 type(value), # not joined to avoid calling `repr`
604 601 f"not allowed in {context.evaluation} mode",
605 602 )
606 603 if isinstance(node, ast.IfExp):
607 604 test = eval_node(node.test, context)
608 605 if test:
609 606 return eval_node(node.body, context)
610 607 else:
611 608 return eval_node(node.orelse, context)
612 609 if isinstance(node, ast.Call):
613 610 func = eval_node(node.func, context)
614 611 if policy.can_call(func) and not node.keywords:
615 612 args = [eval_node(arg, context) for arg in node.args]
616 613 return func(*args)
617 614 if isclass(func):
618 615 # this code path gets entered when calling class e.g. `MyClass()`
619 616 # or `my_instance.__class__()` - in both cases `func` is `MyClass`.
620 617 # Should return `MyClass` if `__new__` is not overridden,
621 618 # otherwise whatever `__new__` return type is.
622 619 overridden_return_type = _eval_return_type(func.__new__, node, context)
623 620 if overridden_return_type is not NOT_EVALUATED:
624 621 return overridden_return_type
625 622 return _create_duck_for_heap_type(func)
626 623 else:
627 624 return_type = _eval_return_type(func, node, context)
628 625 if return_type is not NOT_EVALUATED:
629 626 return return_type
630 627 raise GuardRejection(
631 628 "Call for",
632 629 func, # not joined to avoid calling `repr`
633 630 f"not allowed in {context.evaluation} mode",
634 631 )
635 632 raise ValueError("Unhandled node", ast.dump(node))
636 633
637 634
638 635 def _eval_return_type(func: Callable, node: ast.Call, context: EvaluationContext):
639 636 """Evaluate return type of a given callable function.
640 637
641 638 Returns the built-in type, a duck or NOT_EVALUATED sentinel.
642 639 """
643 640 try:
644 641 sig = signature(func)
645 642 except ValueError:
646 643 sig = UNKNOWN_SIGNATURE
647 644 # if annotation was not stringized, or it was stringized
648 645 # but resolved by signature call we know the return type
649 646 not_empty = sig.return_annotation is not Signature.empty
650 647 if not_empty:
651 648 return _resolve_annotation(sig.return_annotation, sig, func, node, context)
652 649 return NOT_EVALUATED
653 650
654 651
655 652 def _resolve_annotation(
656 653 annotation,
657 654 sig: Signature,
658 655 func: Callable,
659 656 node: ast.Call,
660 657 context: EvaluationContext,
661 658 ):
662 659 """Resolve annotation created by user with `typing` module and custom objects."""
663 660 annotation = (
664 661 _eval_node_name(annotation, context)
665 662 if isinstance(annotation, str)
666 663 else annotation
667 664 )
668 665 origin = get_origin(annotation)
669 666 if annotation is Self and hasattr(func, "__self__"):
670 667 return func.__self__
671 668 elif origin is Literal:
672 669 type_args = get_args(annotation)
673 670 if len(type_args) == 1:
674 671 return type_args[0]
675 672 elif annotation is LiteralString:
676 673 return ""
677 674 elif annotation is AnyStr:
678 675 index = None
679 676 for i, (key, value) in enumerate(sig.parameters.items()):
680 677 if value.annotation is AnyStr:
681 678 index = i
682 679 break
683 680 if index is not None and index < len(node.args):
684 681 return eval_node(node.args[index], context)
685 682 elif origin is TypeGuard:
686 683 return bool()
687 684 elif origin is Union:
688 685 attributes = [
689 686 attr
690 687 for type_arg in get_args(annotation)
691 688 for attr in dir(_resolve_annotation(type_arg, sig, func, node, context))
692 689 ]
693 690 return _Duck(attributes=dict.fromkeys(attributes))
694 691 elif is_typeddict(annotation):
695 692 return _Duck(
696 693 attributes=dict.fromkeys(dir(dict())),
697 694 items={
698 695 k: _resolve_annotation(v, sig, func, node, context)
699 696 for k, v in annotation.__annotations__.items()
700 697 },
701 698 )
702 699 elif hasattr(annotation, "_is_protocol"):
703 700 return _Duck(attributes=dict.fromkeys(dir(annotation)))
704 701 elif origin is Annotated:
705 702 type_arg = get_args(annotation)[0]
706 703 return _resolve_annotation(type_arg, sig, func, node, context)
707 704 elif isinstance(annotation, NewType):
708 705 return _eval_or_create_duck(annotation.__supertype__, node, context)
709 706 elif isinstance(annotation, TypeAliasType):
710 707 return _eval_or_create_duck(annotation.__value__, node, context)
711 708 else:
712 709 return _eval_or_create_duck(annotation, node, context)
713 710
714 711
715 712 def _eval_node_name(node_id: str, context: EvaluationContext):
716 713 policy = EVALUATION_POLICIES[context.evaluation]
717 714 if policy.allow_locals_access and node_id in context.locals:
718 715 return context.locals[node_id]
719 716 if policy.allow_globals_access and node_id in context.globals:
720 717 return context.globals[node_id]
721 718 if policy.allow_builtins_access and hasattr(builtins, node_id):
722 719 # note: do not use __builtins__, it is implementation detail of cPython
723 720 return getattr(builtins, node_id)
724 721 if not policy.allow_globals_access and not policy.allow_locals_access:
725 722 raise GuardRejection(
726 723 f"Namespace access not allowed in {context.evaluation} mode"
727 724 )
728 725 else:
729 726 raise NameError(f"{node_id} not found in locals, globals, nor builtins")
730 727
731 728
732 729 def _eval_or_create_duck(duck_type, node: ast.Call, context: EvaluationContext):
733 730 policy = EVALUATION_POLICIES[context.evaluation]
734 731 # if allow-listed builtin is on type annotation, instantiate it
735 732 if policy.can_call(duck_type) and not node.keywords:
736 733 args = [eval_node(arg, context) for arg in node.args]
737 734 return duck_type(*args)
738 735 # if custom class is in type annotation, mock it
739 736 return _create_duck_for_heap_type(duck_type)
740 737
741 738
742 739 def _create_duck_for_heap_type(duck_type):
743 740 """Create an imitation of an object of a given type (a duck).
744 741
745 742 Returns the duck or NOT_EVALUATED sentinel if duck could not be created.
746 743 """
747 744 duck = ImpersonatingDuck()
748 745 try:
749 746 # this only works for heap types, not builtins
750 747 duck.__class__ = duck_type
751 748 return duck
752 749 except TypeError:
753 750 pass
754 751 return NOT_EVALUATED
755 752
756 753
757 754 SUPPORTED_EXTERNAL_GETITEM = {
758 755 ("pandas", "core", "indexing", "_iLocIndexer"),
759 756 ("pandas", "core", "indexing", "_LocIndexer"),
760 757 ("pandas", "DataFrame"),
761 758 ("pandas", "Series"),
762 759 ("numpy", "ndarray"),
763 760 ("numpy", "void"),
764 761 }
765 762
766 763
767 764 BUILTIN_GETITEM: Set[InstancesHaveGetItem] = {
768 765 dict,
769 766 str, # type: ignore[arg-type]
770 767 bytes, # type: ignore[arg-type]
771 768 list,
772 769 tuple,
773 770 collections.defaultdict,
774 771 collections.deque,
775 772 collections.OrderedDict,
776 773 collections.ChainMap,
777 774 collections.UserDict,
778 775 collections.UserList,
779 776 collections.UserString, # type: ignore[arg-type]
780 777 _DummyNamedTuple,
781 778 _IdentitySubscript,
782 779 }
783 780
784 781
785 782 def _list_methods(cls, source=None):
786 783 """For use on immutable objects or with methods returning a copy"""
787 784 return [getattr(cls, k) for k in (source if source else dir(cls))]
788 785
789 786
790 787 dict_non_mutating_methods = ("copy", "keys", "values", "items")
791 788 list_non_mutating_methods = ("copy", "index", "count")
792 789 set_non_mutating_methods = set(dir(set)) & set(dir(frozenset))
793 790
794 791
795 792 dict_keys: Type[collections.abc.KeysView] = type({}.keys())
796 793
797 794 NUMERICS = {int, float, complex}
798 795
799 796 ALLOWED_CALLS = {
800 797 bytes,
801 798 *_list_methods(bytes),
802 799 dict,
803 800 *_list_methods(dict, dict_non_mutating_methods),
804 801 dict_keys.isdisjoint,
805 802 list,
806 803 *_list_methods(list, list_non_mutating_methods),
807 804 set,
808 805 *_list_methods(set, set_non_mutating_methods),
809 806 frozenset,
810 807 *_list_methods(frozenset),
811 808 range,
812 809 str,
813 810 *_list_methods(str),
814 811 tuple,
815 812 *_list_methods(tuple),
816 813 *NUMERICS,
817 814 *[method for numeric_cls in NUMERICS for method in _list_methods(numeric_cls)],
818 815 collections.deque,
819 816 *_list_methods(collections.deque, list_non_mutating_methods),
820 817 collections.defaultdict,
821 818 *_list_methods(collections.defaultdict, dict_non_mutating_methods),
822 819 collections.OrderedDict,
823 820 *_list_methods(collections.OrderedDict, dict_non_mutating_methods),
824 821 collections.UserDict,
825 822 *_list_methods(collections.UserDict, dict_non_mutating_methods),
826 823 collections.UserList,
827 824 *_list_methods(collections.UserList, list_non_mutating_methods),
828 825 collections.UserString,
829 826 *_list_methods(collections.UserString, dir(str)),
830 827 collections.Counter,
831 828 *_list_methods(collections.Counter, dict_non_mutating_methods),
832 829 collections.Counter.elements,
833 830 collections.Counter.most_common,
834 831 }
835 832
836 833 BUILTIN_GETATTR: Set[MayHaveGetattr] = {
837 834 *BUILTIN_GETITEM,
838 835 set,
839 836 frozenset,
840 837 object,
841 838 type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`.
842 839 *NUMERICS,
843 840 dict_keys,
844 841 MethodDescriptorType,
845 842 ModuleType,
846 843 }
847 844
848 845
849 846 BUILTIN_OPERATIONS = {*BUILTIN_GETATTR}
850 847
851 848 EVALUATION_POLICIES = {
852 849 "minimal": EvaluationPolicy(
853 850 allow_builtins_access=True,
854 851 allow_locals_access=False,
855 852 allow_globals_access=False,
856 853 allow_item_access=False,
857 854 allow_attr_access=False,
858 855 allowed_calls=set(),
859 856 allow_any_calls=False,
860 857 allow_all_operations=False,
861 858 ),
862 859 "limited": SelectivePolicy(
863 860 allowed_getitem=BUILTIN_GETITEM,
864 861 allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM,
865 862 allowed_getattr=BUILTIN_GETATTR,
866 863 allowed_getattr_external={
867 864 # pandas Series/Frame implements custom `__getattr__`
868 865 ("pandas", "DataFrame"),
869 866 ("pandas", "Series"),
870 867 },
871 868 allowed_operations=BUILTIN_OPERATIONS,
872 869 allow_builtins_access=True,
873 870 allow_locals_access=True,
874 871 allow_globals_access=True,
875 872 allowed_calls=ALLOWED_CALLS,
876 873 ),
877 874 "unsafe": EvaluationPolicy(
878 875 allow_builtins_access=True,
879 876 allow_locals_access=True,
880 877 allow_globals_access=True,
881 878 allow_attr_access=True,
882 879 allow_item_access=True,
883 880 allow_any_calls=True,
884 881 allow_all_operations=True,
885 882 ),
886 883 }
887 884
888 885
889 886 __all__ = [
890 887 "guarded_eval",
891 888 "eval_node",
892 889 "GuardRejection",
893 890 "EvaluationContext",
894 891 "_unbind_method",
895 892 ]
@@ -1,3987 +1,3985
1 1 # -*- coding: utf-8 -*-
2 2 """Main IPython class."""
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
6 6 # Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
7 7 # Copyright (C) 2008-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 13
14 14 import abc
15 15 import ast
16 16 import atexit
17 17 import bdb
18 18 import builtins as builtin_mod
19 19 import functools
20 20 import inspect
21 21 import os
22 22 import re
23 23 import runpy
24 24 import shutil
25 25 import subprocess
26 26 import sys
27 27 import tempfile
28 28 import traceback
29 29 import types
30 30 import warnings
31 31 from ast import stmt
32 32 from io import open as io_open
33 33 from logging import error
34 34 from pathlib import Path
35 35 from typing import Callable
36 36 from typing import List as ListType, Dict as DictType, Any as AnyType
37 37 from typing import Optional, Sequence, Tuple
38 38 from warnings import warn
39 39
40 40 try:
41 41 from pickleshare import PickleShareDB
42 42 except ModuleNotFoundError:
43 43
44 44 class PickleShareDB: # type: ignore [no-redef]
45 45 _mock = True
46 46
47 47 def __init__(self, path):
48 48 pass
49 49
50 50 def get(self, key, default=None):
51 51 warn(
52 52 f"This is now an optional IPython functionality, using {key} requires you to install the `pickleshare` library.",
53 53 stacklevel=2,
54 54 )
55 55 return default
56 56
57 57 def __getitem__(self, key):
58 58 warn(
59 59 f"This is now an optional IPython functionality, using {key} requires you to install the `pickleshare` library.",
60 60 stacklevel=2,
61 61 )
62 62 return None
63 63
64 64 def __setitem__(self, key, value):
65 65 warn(
66 66 f"This is now an optional IPython functionality, setting {key} requires you to install the `pickleshare` library.",
67 67 stacklevel=2,
68 68 )
69 69
70 70 def __delitem__(self, key):
71 71 warn(
72 72 f"This is now an optional IPython functionality, deleting {key} requires you to install the `pickleshare` library.",
73 73 stacklevel=2,
74 74 )
75 75
76 76
77 77 from tempfile import TemporaryDirectory
78 78 from traitlets import (
79 79 Any,
80 80 Bool,
81 81 CaselessStrEnum,
82 82 Dict,
83 83 Enum,
84 84 Instance,
85 85 Integer,
86 86 List,
87 87 Type,
88 88 Unicode,
89 89 default,
90 90 observe,
91 91 validate,
92 92 )
93 93 from traitlets.config.configurable import SingletonConfigurable
94 94 from traitlets.utils.importstring import import_item
95 95
96 96 import IPython.core.hooks
97 97 from IPython.core import magic, oinspect, page, prefilter, ultratb
98 98 from IPython.core.alias import Alias, AliasManager
99 99 from IPython.core.autocall import ExitAutocall
100 100 from IPython.core.builtin_trap import BuiltinTrap
101 101 from IPython.core.compilerop import CachingCompiler
102 102 from IPython.core.debugger import InterruptiblePdb
103 103 from IPython.core.display_trap import DisplayTrap
104 104 from IPython.core.displayhook import DisplayHook
105 105 from IPython.core.displaypub import DisplayPublisher
106 106 from IPython.core.error import InputRejected, UsageError
107 107 from IPython.core.events import EventManager, available_events
108 108 from IPython.core.extensions import ExtensionManager
109 109 from IPython.core.formatters import DisplayFormatter
110 110 from IPython.core.history import HistoryManager
111 111 from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
112 112 from IPython.core.logger import Logger
113 113 from IPython.core.macro import Macro
114 114 from IPython.core.payload import PayloadManager
115 115 from IPython.core.prefilter import PrefilterManager
116 116 from IPython.core.profiledir import ProfileDir
117 117 from IPython.core.usage import default_banner
118 118 from IPython.display import display
119 119 from IPython.paths import get_ipython_dir
120 120 from IPython.testing.skipdoctest import skip_doctest
121 121 from IPython.utils import PyColorize, io, openpy, py3compat
122 122 from IPython.utils.decorators import undoc
123 123 from IPython.utils.io import ask_yes_no
124 124 from IPython.utils.ipstruct import Struct
125 125 from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename
126 126 from IPython.utils.process import getoutput, system
127 127 from IPython.utils.strdispatch import StrDispatch
128 128 from IPython.utils.syspathcontext import prepended_to_syspath
129 129 from IPython.utils.text import DollarFormatter, LSString, SList, format_screen
130 130 from IPython.core.oinspect import OInfo
131 131
132 132
133 133 sphinxify: Optional[Callable]
134 134
135 135 try:
136 136 import docrepr.sphinxify as sphx
137 137
138 138 def sphinxify(oinfo):
139 139 wrapped_docstring = sphx.wrap_main_docstring(oinfo)
140 140
141 141 def sphinxify_docstring(docstring):
142 142 with TemporaryDirectory() as dirname:
143 143 return {
144 144 "text/html": sphx.sphinxify(wrapped_docstring, dirname),
145 145 "text/plain": docstring,
146 146 }
147 147
148 148 return sphinxify_docstring
149 149 except ImportError:
150 150 sphinxify = None
151 151
152 if sys.version_info[:2] < (3, 11):
153 from exceptiongroup import BaseExceptionGroup
154 152
155 153 class ProvisionalWarning(DeprecationWarning):
156 154 """
157 155 Warning class for unstable features
158 156 """
159 157 pass
160 158
161 159 from ast import Module
162 160
163 161 _assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
164 162 _single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
165 163
166 164 #-----------------------------------------------------------------------------
167 165 # Await Helpers
168 166 #-----------------------------------------------------------------------------
169 167
170 168 # we still need to run things using the asyncio eventloop, but there is no
171 169 # async integration
172 170 from .async_helpers import (
173 171 _asyncio_runner,
174 172 _curio_runner,
175 173 _pseudo_sync_runner,
176 174 _should_be_async,
177 175 _trio_runner,
178 176 )
179 177
180 178 #-----------------------------------------------------------------------------
181 179 # Globals
182 180 #-----------------------------------------------------------------------------
183 181
184 182 # compiled regexps for autoindent management
185 183 dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
186 184
187 185 #-----------------------------------------------------------------------------
188 186 # Utilities
189 187 #-----------------------------------------------------------------------------
190 188
191 189
192 190 def is_integer_string(s: str):
193 191 """
194 192 Variant of "str.isnumeric()" that allow negative values and other ints.
195 193 """
196 194 try:
197 195 int(s)
198 196 return True
199 197 except ValueError:
200 198 return False
201 199 raise ValueError("Unexpected error")
202 200
203 201
204 202 @undoc
205 203 def softspace(file, newvalue):
206 204 """Copied from code.py, to remove the dependency"""
207 205
208 206 oldvalue = 0
209 207 try:
210 208 oldvalue = file.softspace
211 209 except AttributeError:
212 210 pass
213 211 try:
214 212 file.softspace = newvalue
215 213 except (AttributeError, TypeError):
216 214 # "attribute-less object" or "read-only attributes"
217 215 pass
218 216 return oldvalue
219 217
220 218 @undoc
221 219 def no_op(*a, **kw):
222 220 pass
223 221
224 222
225 223 class SpaceInInput(Exception): pass
226 224
227 225
228 226 class SeparateUnicode(Unicode):
229 227 r"""A Unicode subclass to validate separate_in, separate_out, etc.
230 228
231 229 This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
232 230 """
233 231
234 232 def validate(self, obj, value):
235 233 if value == '0': value = ''
236 234 value = value.replace('\\n','\n')
237 235 return super(SeparateUnicode, self).validate(obj, value)
238 236
239 237
240 238 @undoc
241 239 class DummyMod(object):
242 240 """A dummy module used for IPython's interactive module when
243 241 a namespace must be assigned to the module's __dict__."""
244 242 __spec__ = None
245 243
246 244
247 245 class ExecutionInfo(object):
248 246 """The arguments used for a call to :meth:`InteractiveShell.run_cell`
249 247
250 248 Stores information about what is going to happen.
251 249 """
252 250 raw_cell = None
253 251 store_history = False
254 252 silent = False
255 253 shell_futures = True
256 254 cell_id = None
257 255
258 256 def __init__(self, raw_cell, store_history, silent, shell_futures, cell_id):
259 257 self.raw_cell = raw_cell
260 258 self.store_history = store_history
261 259 self.silent = silent
262 260 self.shell_futures = shell_futures
263 261 self.cell_id = cell_id
264 262
265 263 def __repr__(self):
266 264 name = self.__class__.__qualname__
267 265 raw_cell = (
268 266 (self.raw_cell[:50] + "..") if len(self.raw_cell) > 50 else self.raw_cell
269 267 )
270 268 return (
271 269 '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s cell_id=%s>'
272 270 % (
273 271 name,
274 272 id(self),
275 273 raw_cell,
276 274 self.store_history,
277 275 self.silent,
278 276 self.shell_futures,
279 277 self.cell_id,
280 278 )
281 279 )
282 280
283 281
284 282 class ExecutionResult:
285 283 """The result of a call to :meth:`InteractiveShell.run_cell`
286 284
287 285 Stores information about what took place.
288 286 """
289 287
290 288 execution_count: Optional[int] = None
291 289 error_before_exec: Optional[bool] = None
292 290 error_in_exec: Optional[BaseException] = None
293 291 info = None
294 292 result = None
295 293
296 294 def __init__(self, info):
297 295 self.info = info
298 296
299 297 @property
300 298 def success(self):
301 299 return (self.error_before_exec is None) and (self.error_in_exec is None)
302 300
303 301 def raise_error(self):
304 302 """Reraises error if `success` is `False`, otherwise does nothing"""
305 303 if self.error_before_exec is not None:
306 304 raise self.error_before_exec
307 305 if self.error_in_exec is not None:
308 306 raise self.error_in_exec
309 307
310 308 def __repr__(self):
311 309 name = self.__class__.__qualname__
312 310 return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
313 311 (name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
314 312
315 313 @functools.wraps(io_open)
316 314 def _modified_open(file, *args, **kwargs):
317 315 if file in {0, 1, 2}:
318 316 raise ValueError(
319 317 f"IPython won't let you open fd={file} by default "
320 318 "as it is likely to crash IPython. If you know what you are doing, "
321 319 "you can use builtins' open."
322 320 )
323 321
324 322 return io_open(file, *args, **kwargs)
325 323
326 324 class InteractiveShell(SingletonConfigurable):
327 325 """An enhanced, interactive shell for Python."""
328 326
329 327 _instance = None
330 328
331 329 ast_transformers: List[ast.NodeTransformer] = List(
332 330 [],
333 331 help="""
334 332 A list of ast.NodeTransformer subclass instances, which will be applied
335 333 to user input before code is run.
336 334 """,
337 335 ).tag(config=True)
338 336
339 337 autocall = Enum((0,1,2), default_value=0, help=
340 338 """
341 339 Make IPython automatically call any callable object even if you didn't
342 340 type explicit parentheses. For example, 'str 43' becomes 'str(43)'
343 341 automatically. The value can be '0' to disable the feature, '1' for
344 342 'smart' autocall, where it is not applied if there are no more
345 343 arguments on the line, and '2' for 'full' autocall, where all callable
346 344 objects are automatically called (even if no arguments are present).
347 345 """
348 346 ).tag(config=True)
349 347
350 348 autoindent = Bool(True, help=
351 349 """
352 350 Autoindent IPython code entered interactively.
353 351 """
354 352 ).tag(config=True)
355 353
356 354 autoawait = Bool(True, help=
357 355 """
358 356 Automatically run await statement in the top level repl.
359 357 """
360 358 ).tag(config=True)
361 359
362 360 loop_runner_map ={
363 361 'asyncio':(_asyncio_runner, True),
364 362 'curio':(_curio_runner, True),
365 363 'trio':(_trio_runner, True),
366 364 'sync': (_pseudo_sync_runner, False)
367 365 }
368 366
369 367 loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
370 368 allow_none=True,
371 369 help="""Select the loop runner that will be used to execute top-level asynchronous code"""
372 370 ).tag(config=True)
373 371
374 372 @default('loop_runner')
375 373 def _default_loop_runner(self):
376 374 return import_item("IPython.core.interactiveshell._asyncio_runner")
377 375
378 376 @validate('loop_runner')
379 377 def _import_runner(self, proposal):
380 378 if isinstance(proposal.value, str):
381 379 if proposal.value in self.loop_runner_map:
382 380 runner, autoawait = self.loop_runner_map[proposal.value]
383 381 self.autoawait = autoawait
384 382 return runner
385 383 runner = import_item(proposal.value)
386 384 if not callable(runner):
387 385 raise ValueError('loop_runner must be callable')
388 386 return runner
389 387 if not callable(proposal.value):
390 388 raise ValueError('loop_runner must be callable')
391 389 return proposal.value
392 390
393 391 automagic = Bool(True, help=
394 392 """
395 393 Enable magic commands to be called without the leading %.
396 394 """
397 395 ).tag(config=True)
398 396
399 397 banner1 = Unicode(default_banner,
400 398 help="""The part of the banner to be printed before the profile"""
401 399 ).tag(config=True)
402 400 banner2 = Unicode('',
403 401 help="""The part of the banner to be printed after the profile"""
404 402 ).tag(config=True)
405 403
406 404 cache_size = Integer(1000, help=
407 405 """
408 406 Set the size of the output cache. The default is 1000, you can
409 407 change it permanently in your config file. Setting it to 0 completely
410 408 disables the caching system, and the minimum value accepted is 3 (if
411 409 you provide a value less than 3, it is reset to 0 and a warning is
412 410 issued). This limit is defined because otherwise you'll spend more
413 411 time re-flushing a too small cache than working
414 412 """
415 413 ).tag(config=True)
416 414 color_info = Bool(True, help=
417 415 """
418 416 Use colors for displaying information about objects. Because this
419 417 information is passed through a pager (like 'less'), and some pagers
420 418 get confused with color codes, this capability can be turned off.
421 419 """
422 420 ).tag(config=True)
423 421 colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
424 422 default_value='Neutral',
425 423 help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
426 424 ).tag(config=True)
427 425 debug = Bool(False).tag(config=True)
428 426 disable_failing_post_execute = Bool(False,
429 427 help="Don't call post-execute functions that have failed in the past."
430 428 ).tag(config=True)
431 429 display_formatter = Instance(DisplayFormatter, allow_none=True)
432 430 displayhook_class = Type(DisplayHook)
433 431 display_pub_class = Type(DisplayPublisher)
434 432 compiler_class = Type(CachingCompiler)
435 433 inspector_class = Type(
436 434 oinspect.Inspector, help="Class to use to instantiate the shell inspector"
437 435 ).tag(config=True)
438 436
439 437 sphinxify_docstring = Bool(False, help=
440 438 """
441 439 Enables rich html representation of docstrings. (This requires the
442 440 docrepr module).
443 441 """).tag(config=True)
444 442
445 443 @observe("sphinxify_docstring")
446 444 def _sphinxify_docstring_changed(self, change):
447 445 if change['new']:
448 446 warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
449 447
450 448 enable_html_pager = Bool(False, help=
451 449 """
452 450 (Provisional API) enables html representation in mime bundles sent
453 451 to pagers.
454 452 """).tag(config=True)
455 453
456 454 @observe("enable_html_pager")
457 455 def _enable_html_pager_changed(self, change):
458 456 if change['new']:
459 457 warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
460 458
461 459 data_pub_class = None
462 460
463 461 exit_now = Bool(False)
464 462 exiter = Instance(ExitAutocall)
465 463 @default('exiter')
466 464 def _exiter_default(self):
467 465 return ExitAutocall(self)
468 466 # Monotonically increasing execution counter
469 467 execution_count = Integer(1)
470 468 filename = Unicode("<ipython console>")
471 469 ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
472 470
473 471 # Used to transform cells before running them, and check whether code is complete
474 472 input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
475 473 ())
476 474
477 475 @property
478 476 def input_transformers_cleanup(self):
479 477 return self.input_transformer_manager.cleanup_transforms
480 478
481 479 input_transformers_post: List = List(
482 480 [],
483 481 help="A list of string input transformers, to be applied after IPython's "
484 482 "own input transformations."
485 483 )
486 484
487 485 @property
488 486 def input_splitter(self):
489 487 """Make this available for backward compatibility (pre-7.0 release) with existing code.
490 488
491 489 For example, ipykernel ipykernel currently uses
492 490 `shell.input_splitter.check_complete`
493 491 """
494 492 from warnings import warn
495 493 warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
496 494 DeprecationWarning, stacklevel=2
497 495 )
498 496 return self.input_transformer_manager
499 497
500 498 logstart = Bool(False, help=
501 499 """
502 500 Start logging to the default log file in overwrite mode.
503 501 Use `logappend` to specify a log file to **append** logs to.
504 502 """
505 503 ).tag(config=True)
506 504 logfile = Unicode('', help=
507 505 """
508 506 The name of the logfile to use.
509 507 """
510 508 ).tag(config=True)
511 509 logappend = Unicode('', help=
512 510 """
513 511 Start logging to the given file in append mode.
514 512 Use `logfile` to specify a log file to **overwrite** logs to.
515 513 """
516 514 ).tag(config=True)
517 515 object_info_string_level = Enum((0,1,2), default_value=0,
518 516 ).tag(config=True)
519 517 pdb = Bool(False, help=
520 518 """
521 519 Automatically call the pdb debugger after every exception.
522 520 """
523 521 ).tag(config=True)
524 522 display_page = Bool(False,
525 523 help="""If True, anything that would be passed to the pager
526 524 will be displayed as regular output instead."""
527 525 ).tag(config=True)
528 526
529 527
530 528 show_rewritten_input = Bool(True,
531 529 help="Show rewritten input, e.g. for autocall."
532 530 ).tag(config=True)
533 531
534 532 quiet = Bool(False).tag(config=True)
535 533
536 534 history_length = Integer(10000,
537 535 help='Total length of command history'
538 536 ).tag(config=True)
539 537
540 538 history_load_length = Integer(1000, help=
541 539 """
542 540 The number of saved history entries to be loaded
543 541 into the history buffer at startup.
544 542 """
545 543 ).tag(config=True)
546 544
547 545 ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
548 546 default_value='last_expr',
549 547 help="""
550 548 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
551 549 which nodes should be run interactively (displaying output from expressions).
552 550 """
553 551 ).tag(config=True)
554 552
555 553 warn_venv = Bool(
556 554 True,
557 555 help="Warn if running in a virtual environment with no IPython installed (so IPython from the global environment is used).",
558 556 ).tag(config=True)
559 557
560 558 # TODO: this part of prompt management should be moved to the frontends.
561 559 # Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
562 560 separate_in = SeparateUnicode('\n').tag(config=True)
563 561 separate_out = SeparateUnicode('').tag(config=True)
564 562 separate_out2 = SeparateUnicode('').tag(config=True)
565 563 wildcards_case_sensitive = Bool(True).tag(config=True)
566 564 xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
567 565 default_value='Context',
568 566 help="Switch modes for the IPython exception handlers."
569 567 ).tag(config=True)
570 568
571 569 # Subcomponents of InteractiveShell
572 570 alias_manager = Instance("IPython.core.alias.AliasManager", allow_none=True)
573 571 prefilter_manager = Instance(
574 572 "IPython.core.prefilter.PrefilterManager", allow_none=True
575 573 )
576 574 builtin_trap = Instance("IPython.core.builtin_trap.BuiltinTrap")
577 575 display_trap = Instance("IPython.core.display_trap.DisplayTrap")
578 576 extension_manager = Instance(
579 577 "IPython.core.extensions.ExtensionManager", allow_none=True
580 578 )
581 579 payload_manager = Instance("IPython.core.payload.PayloadManager", allow_none=True)
582 580 history_manager = Instance(
583 581 "IPython.core.history.HistoryAccessorBase", allow_none=True
584 582 )
585 583 magics_manager = Instance("IPython.core.magic.MagicsManager")
586 584
587 585 profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
588 586 @property
589 587 def profile(self):
590 588 if self.profile_dir is not None:
591 589 name = os.path.basename(self.profile_dir.location)
592 590 return name.replace('profile_','')
593 591
594 592
595 593 # Private interface
596 594 _post_execute = Dict()
597 595
598 596 # Tracks any GUI loop loaded for pylab
599 597 pylab_gui_select = None
600 598
601 599 last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
602 600
603 601 last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
604 602
605 603 def __init__(self, ipython_dir=None, profile_dir=None,
606 604 user_module=None, user_ns=None,
607 605 custom_exceptions=((), None), **kwargs):
608 606 # This is where traits with a config_key argument are updated
609 607 # from the values on config.
610 608 super(InteractiveShell, self).__init__(**kwargs)
611 609 if 'PromptManager' in self.config:
612 610 warn('As of IPython 5.0 `PromptManager` config will have no effect'
613 611 ' and has been replaced by TerminalInteractiveShell.prompts_class')
614 612 self.configurables = [self]
615 613
616 614 # These are relatively independent and stateless
617 615 self.init_ipython_dir(ipython_dir)
618 616 self.init_profile_dir(profile_dir)
619 617 self.init_instance_attrs()
620 618 self.init_environment()
621 619
622 620 # Check if we're in a virtualenv, and set up sys.path.
623 621 self.init_virtualenv()
624 622
625 623 # Create namespaces (user_ns, user_global_ns, etc.)
626 624 self.init_create_namespaces(user_module, user_ns)
627 625 # This has to be done after init_create_namespaces because it uses
628 626 # something in self.user_ns, but before init_sys_modules, which
629 627 # is the first thing to modify sys.
630 628 # TODO: When we override sys.stdout and sys.stderr before this class
631 629 # is created, we are saving the overridden ones here. Not sure if this
632 630 # is what we want to do.
633 631 self.save_sys_module_state()
634 632 self.init_sys_modules()
635 633
636 634 # While we're trying to have each part of the code directly access what
637 635 # it needs without keeping redundant references to objects, we have too
638 636 # much legacy code that expects ip.db to exist.
639 637 self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
640 638
641 639 self.init_history()
642 640 self.init_encoding()
643 641 self.init_prefilter()
644 642
645 643 self.init_syntax_highlighting()
646 644 self.init_hooks()
647 645 self.init_events()
648 646 self.init_pushd_popd_magic()
649 647 self.init_user_ns()
650 648 self.init_logger()
651 649 self.init_builtins()
652 650
653 651 # The following was in post_config_initialization
654 652 self.init_inspector()
655 653 self.raw_input_original = input
656 654 self.init_completer()
657 655 # TODO: init_io() needs to happen before init_traceback handlers
658 656 # because the traceback handlers hardcode the stdout/stderr streams.
659 657 # This logic in in debugger.Pdb and should eventually be changed.
660 658 self.init_io()
661 659 self.init_traceback_handlers(custom_exceptions)
662 660 self.init_prompts()
663 661 self.init_display_formatter()
664 662 self.init_display_pub()
665 663 self.init_data_pub()
666 664 self.init_displayhook()
667 665 self.init_magics()
668 666 self.init_alias()
669 667 self.init_logstart()
670 668 self.init_pdb()
671 669 self.init_extension_manager()
672 670 self.init_payload()
673 671 self.events.trigger('shell_initialized', self)
674 672 atexit.register(self.atexit_operations)
675 673
676 674 # The trio runner is used for running Trio in the foreground thread. It
677 675 # is different from `_trio_runner(async_fn)` in `async_helpers.py`
678 676 # which calls `trio.run()` for every cell. This runner runs all cells
679 677 # inside a single Trio event loop. If used, it is set from
680 678 # `ipykernel.kernelapp`.
681 679 self.trio_runner = None
682 680
683 681 def get_ipython(self):
684 682 """Return the currently running IPython instance."""
685 683 return self
686 684
687 685 #-------------------------------------------------------------------------
688 686 # Trait changed handlers
689 687 #-------------------------------------------------------------------------
690 688 @observe('ipython_dir')
691 689 def _ipython_dir_changed(self, change):
692 690 ensure_dir_exists(change['new'])
693 691
694 692 def set_autoindent(self,value=None):
695 693 """Set the autoindent flag.
696 694
697 695 If called with no arguments, it acts as a toggle."""
698 696 if value is None:
699 697 self.autoindent = not self.autoindent
700 698 else:
701 699 self.autoindent = value
702 700
703 701 def set_trio_runner(self, tr):
704 702 self.trio_runner = tr
705 703
706 704 #-------------------------------------------------------------------------
707 705 # init_* methods called by __init__
708 706 #-------------------------------------------------------------------------
709 707
710 708 def init_ipython_dir(self, ipython_dir):
711 709 if ipython_dir is not None:
712 710 self.ipython_dir = ipython_dir
713 711 return
714 712
715 713 self.ipython_dir = get_ipython_dir()
716 714
717 715 def init_profile_dir(self, profile_dir):
718 716 if profile_dir is not None:
719 717 self.profile_dir = profile_dir
720 718 return
721 719 self.profile_dir = ProfileDir.create_profile_dir_by_name(
722 720 self.ipython_dir, "default"
723 721 )
724 722
725 723 def init_instance_attrs(self):
726 724 self.more = False
727 725
728 726 # command compiler
729 727 self.compile = self.compiler_class()
730 728
731 729 # Make an empty namespace, which extension writers can rely on both
732 730 # existing and NEVER being used by ipython itself. This gives them a
733 731 # convenient location for storing additional information and state
734 732 # their extensions may require, without fear of collisions with other
735 733 # ipython names that may develop later.
736 734 self.meta = Struct()
737 735
738 736 # Temporary files used for various purposes. Deleted at exit.
739 737 # The files here are stored with Path from Pathlib
740 738 self.tempfiles = []
741 739 self.tempdirs = []
742 740
743 741 # keep track of where we started running (mainly for crash post-mortem)
744 742 # This is not being used anywhere currently.
745 743 self.starting_dir = os.getcwd()
746 744
747 745 # Indentation management
748 746 self.indent_current_nsp = 0
749 747
750 748 # Dict to track post-execution functions that have been registered
751 749 self._post_execute = {}
752 750
753 751 def init_environment(self):
754 752 """Any changes we need to make to the user's environment."""
755 753 pass
756 754
757 755 def init_encoding(self):
758 756 # Get system encoding at startup time. Certain terminals (like Emacs
759 757 # under Win32 have it set to None, and we need to have a known valid
760 758 # encoding to use in the raw_input() method
761 759 try:
762 760 self.stdin_encoding = sys.stdin.encoding or 'ascii'
763 761 except AttributeError:
764 762 self.stdin_encoding = 'ascii'
765 763
766 764
767 765 @observe('colors')
768 766 def init_syntax_highlighting(self, changes=None):
769 767 # Python source parser/formatter for syntax highlighting
770 768 pyformat = PyColorize.Parser(style=self.colors, parent=self).format
771 769 self.pycolorize = lambda src: pyformat(src,'str')
772 770
773 771 def refresh_style(self):
774 772 # No-op here, used in subclass
775 773 pass
776 774
777 775 def init_pushd_popd_magic(self):
778 776 # for pushd/popd management
779 777 self.home_dir = get_home_dir()
780 778
781 779 self.dir_stack = []
782 780
783 781 def init_logger(self):
784 782 self.logger = Logger(self.home_dir, logfname='ipython_log.py',
785 783 logmode='rotate')
786 784
787 785 def init_logstart(self):
788 786 """Initialize logging in case it was requested at the command line.
789 787 """
790 788 if self.logappend:
791 789 self.magic('logstart %s append' % self.logappend)
792 790 elif self.logfile:
793 791 self.magic('logstart %s' % self.logfile)
794 792 elif self.logstart:
795 793 self.magic('logstart')
796 794
797 795
798 796 def init_builtins(self):
799 797 # A single, static flag that we set to True. Its presence indicates
800 798 # that an IPython shell has been created, and we make no attempts at
801 799 # removing on exit or representing the existence of more than one
802 800 # IPython at a time.
803 801 builtin_mod.__dict__['__IPYTHON__'] = True
804 802 builtin_mod.__dict__['display'] = display
805 803
806 804 self.builtin_trap = BuiltinTrap(shell=self)
807 805
808 806 @observe('colors')
809 807 def init_inspector(self, changes=None):
810 808 # Object inspector
811 809 self.inspector = self.inspector_class(
812 810 oinspect.InspectColors,
813 811 PyColorize.ANSICodeColors,
814 812 self.colors,
815 813 self.object_info_string_level,
816 814 )
817 815
818 816 def init_io(self):
819 817 # implemented in subclasses, TerminalInteractiveShell does call
820 818 # colorama.init().
821 819 pass
822 820
823 821 def init_prompts(self):
824 822 # Set system prompts, so that scripts can decide if they are running
825 823 # interactively.
826 824 sys.ps1 = 'In : '
827 825 sys.ps2 = '...: '
828 826 sys.ps3 = 'Out: '
829 827
830 828 def init_display_formatter(self):
831 829 self.display_formatter = DisplayFormatter(parent=self)
832 830 self.configurables.append(self.display_formatter)
833 831
834 832 def init_display_pub(self):
835 833 self.display_pub = self.display_pub_class(parent=self, shell=self)
836 834 self.configurables.append(self.display_pub)
837 835
838 836 def init_data_pub(self):
839 837 if not self.data_pub_class:
840 838 self.data_pub = None
841 839 return
842 840 self.data_pub = self.data_pub_class(parent=self)
843 841 self.configurables.append(self.data_pub)
844 842
845 843 def init_displayhook(self):
846 844 # Initialize displayhook, set in/out prompts and printing system
847 845 self.displayhook = self.displayhook_class(
848 846 parent=self,
849 847 shell=self,
850 848 cache_size=self.cache_size,
851 849 )
852 850 self.configurables.append(self.displayhook)
853 851 # This is a context manager that installs/revmoes the displayhook at
854 852 # the appropriate time.
855 853 self.display_trap = DisplayTrap(hook=self.displayhook)
856 854
857 855 @staticmethod
858 856 def get_path_links(p: Path):
859 857 """Gets path links including all symlinks
860 858
861 859 Examples
862 860 --------
863 861 In [1]: from IPython.core.interactiveshell import InteractiveShell
864 862
865 863 In [2]: import sys, pathlib
866 864
867 865 In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable))
868 866
869 867 In [4]: len(paths) == len(set(paths))
870 868 Out[4]: True
871 869
872 870 In [5]: bool(paths)
873 871 Out[5]: True
874 872 """
875 873 paths = [p]
876 874 while p.is_symlink():
877 875 new_path = Path(os.readlink(p))
878 876 if not new_path.is_absolute():
879 877 new_path = p.parent / new_path
880 878 p = new_path
881 879 paths.append(p)
882 880 return paths
883 881
884 882 def init_virtualenv(self):
885 883 """Add the current virtualenv to sys.path so the user can import modules from it.
886 884 This isn't perfect: it doesn't use the Python interpreter with which the
887 885 virtualenv was built, and it ignores the --no-site-packages option. A
888 886 warning will appear suggesting the user installs IPython in the
889 887 virtualenv, but for many cases, it probably works well enough.
890 888
891 889 Adapted from code snippets online.
892 890
893 891 http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
894 892 """
895 893 if 'VIRTUAL_ENV' not in os.environ:
896 894 # Not in a virtualenv
897 895 return
898 896 elif os.environ["VIRTUAL_ENV"] == "":
899 897 warn("Virtual env path set to '', please check if this is intended.")
900 898 return
901 899
902 900 p = Path(sys.executable)
903 901 p_venv = Path(os.environ["VIRTUAL_ENV"])
904 902
905 903 # fallback venv detection:
906 904 # stdlib venv may symlink sys.executable, so we can't use realpath.
907 905 # but others can symlink *to* the venv Python, so we can't just use sys.executable.
908 906 # So we just check every item in the symlink tree (generally <= 3)
909 907 paths = self.get_path_links(p)
910 908
911 909 # In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
912 910 if len(p_venv.parts) > 2 and p_venv.parts[1] == "cygdrive":
913 911 drive_name = p_venv.parts[2]
914 912 p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
915 913
916 914 if any(p_venv == p.parents[1] for p in paths):
917 915 # Our exe is inside or has access to the virtualenv, don't need to do anything.
918 916 return
919 917
920 918 if sys.platform == "win32":
921 919 virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
922 920 else:
923 921 virtual_env_path = Path(
924 922 os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
925 923 )
926 924 p_ver = sys.version_info[:2]
927 925
928 926 # Predict version from py[thon]-x.x in the $VIRTUAL_ENV
929 927 re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
930 928 if re_m:
931 929 predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
932 930 if predicted_path.exists():
933 931 p_ver = re_m.groups()
934 932
935 933 virtual_env = str(virtual_env_path).format(*p_ver)
936 934 if self.warn_venv:
937 935 warn(
938 936 "Attempting to work in a virtualenv. If you encounter problems, "
939 937 "please install IPython inside the virtualenv."
940 938 )
941 939 import site
942 940 sys.path.insert(0, virtual_env)
943 941 site.addsitedir(virtual_env)
944 942
945 943 #-------------------------------------------------------------------------
946 944 # Things related to injections into the sys module
947 945 #-------------------------------------------------------------------------
948 946
949 947 def save_sys_module_state(self):
950 948 """Save the state of hooks in the sys module.
951 949
952 950 This has to be called after self.user_module is created.
953 951 """
954 952 self._orig_sys_module_state = {'stdin': sys.stdin,
955 953 'stdout': sys.stdout,
956 954 'stderr': sys.stderr,
957 955 'excepthook': sys.excepthook}
958 956 self._orig_sys_modules_main_name = self.user_module.__name__
959 957 self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
960 958
961 959 def restore_sys_module_state(self):
962 960 """Restore the state of the sys module."""
963 961 try:
964 962 for k, v in self._orig_sys_module_state.items():
965 963 setattr(sys, k, v)
966 964 except AttributeError:
967 965 pass
968 966 # Reset what what done in self.init_sys_modules
969 967 if self._orig_sys_modules_main_mod is not None:
970 968 sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
971 969
972 970 #-------------------------------------------------------------------------
973 971 # Things related to the banner
974 972 #-------------------------------------------------------------------------
975 973
976 974 @property
977 975 def banner(self):
978 976 banner = self.banner1
979 977 if self.profile and self.profile != 'default':
980 978 banner += '\nIPython profile: %s\n' % self.profile
981 979 if self.banner2:
982 980 banner += '\n' + self.banner2
983 981 return banner
984 982
985 983 def show_banner(self, banner=None):
986 984 if banner is None:
987 985 banner = self.banner
988 986 sys.stdout.write(banner)
989 987
990 988 #-------------------------------------------------------------------------
991 989 # Things related to hooks
992 990 #-------------------------------------------------------------------------
993 991
994 992 def init_hooks(self):
995 993 # hooks holds pointers used for user-side customizations
996 994 self.hooks = Struct()
997 995
998 996 self.strdispatchers = {}
999 997
1000 998 # Set all default hooks, defined in the IPython.hooks module.
1001 999 hooks = IPython.core.hooks
1002 1000 for hook_name in hooks.__all__:
1003 1001 # default hooks have priority 100, i.e. low; user hooks should have
1004 1002 # 0-100 priority
1005 1003 self.set_hook(hook_name, getattr(hooks, hook_name), 100)
1006 1004
1007 1005 if self.display_page:
1008 1006 self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
1009 1007
1010 1008 def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
1011 1009 """set_hook(name,hook) -> sets an internal IPython hook.
1012 1010
1013 1011 IPython exposes some of its internal API as user-modifiable hooks. By
1014 1012 adding your function to one of these hooks, you can modify IPython's
1015 1013 behavior to call at runtime your own routines."""
1016 1014
1017 1015 # At some point in the future, this should validate the hook before it
1018 1016 # accepts it. Probably at least check that the hook takes the number
1019 1017 # of args it's supposed to.
1020 1018
1021 1019 f = types.MethodType(hook,self)
1022 1020
1023 1021 # check if the hook is for strdispatcher first
1024 1022 if str_key is not None:
1025 1023 sdp = self.strdispatchers.get(name, StrDispatch())
1026 1024 sdp.add_s(str_key, f, priority )
1027 1025 self.strdispatchers[name] = sdp
1028 1026 return
1029 1027 if re_key is not None:
1030 1028 sdp = self.strdispatchers.get(name, StrDispatch())
1031 1029 sdp.add_re(re.compile(re_key), f, priority )
1032 1030 self.strdispatchers[name] = sdp
1033 1031 return
1034 1032
1035 1033 dp = getattr(self.hooks, name, None)
1036 1034 if name not in IPython.core.hooks.__all__:
1037 1035 print("Warning! Hook '%s' is not one of %s" % \
1038 1036 (name, IPython.core.hooks.__all__ ))
1039 1037
1040 1038 if name in IPython.core.hooks.deprecated:
1041 1039 alternative = IPython.core.hooks.deprecated[name]
1042 1040 raise ValueError(
1043 1041 "Hook {} has been deprecated since IPython 5.0. Use {} instead.".format(
1044 1042 name, alternative
1045 1043 )
1046 1044 )
1047 1045
1048 1046 if not dp:
1049 1047 dp = IPython.core.hooks.CommandChainDispatcher()
1050 1048
1051 1049 try:
1052 1050 dp.add(f,priority)
1053 1051 except AttributeError:
1054 1052 # it was not commandchain, plain old func - replace
1055 1053 dp = f
1056 1054
1057 1055 setattr(self.hooks,name, dp)
1058 1056
1059 1057 #-------------------------------------------------------------------------
1060 1058 # Things related to events
1061 1059 #-------------------------------------------------------------------------
1062 1060
1063 1061 def init_events(self):
1064 1062 self.events = EventManager(self, available_events)
1065 1063
1066 1064 self.events.register("pre_execute", self._clear_warning_registry)
1067 1065
1068 1066 def register_post_execute(self, func):
1069 1067 """DEPRECATED: Use ip.events.register('post_run_cell', func)
1070 1068
1071 1069 Register a function for calling after code execution.
1072 1070 """
1073 1071 raise ValueError(
1074 1072 "ip.register_post_execute is deprecated since IPython 1.0, use "
1075 1073 "ip.events.register('post_run_cell', func) instead."
1076 1074 )
1077 1075
1078 1076 def _clear_warning_registry(self):
1079 1077 # clear the warning registry, so that different code blocks with
1080 1078 # overlapping line number ranges don't cause spurious suppression of
1081 1079 # warnings (see gh-6611 for details)
1082 1080 if "__warningregistry__" in self.user_global_ns:
1083 1081 del self.user_global_ns["__warningregistry__"]
1084 1082
1085 1083 #-------------------------------------------------------------------------
1086 1084 # Things related to the "main" module
1087 1085 #-------------------------------------------------------------------------
1088 1086
1089 1087 def new_main_mod(self, filename, modname):
1090 1088 """Return a new 'main' module object for user code execution.
1091 1089
1092 1090 ``filename`` should be the path of the script which will be run in the
1093 1091 module. Requests with the same filename will get the same module, with
1094 1092 its namespace cleared.
1095 1093
1096 1094 ``modname`` should be the module name - normally either '__main__' or
1097 1095 the basename of the file without the extension.
1098 1096
1099 1097 When scripts are executed via %run, we must keep a reference to their
1100 1098 __main__ module around so that Python doesn't
1101 1099 clear it, rendering references to module globals useless.
1102 1100
1103 1101 This method keeps said reference in a private dict, keyed by the
1104 1102 absolute path of the script. This way, for multiple executions of the
1105 1103 same script we only keep one copy of the namespace (the last one),
1106 1104 thus preventing memory leaks from old references while allowing the
1107 1105 objects from the last execution to be accessible.
1108 1106 """
1109 1107 filename = os.path.abspath(filename)
1110 1108 try:
1111 1109 main_mod = self._main_mod_cache[filename]
1112 1110 except KeyError:
1113 1111 main_mod = self._main_mod_cache[filename] = types.ModuleType(
1114 1112 modname,
1115 1113 doc="Module created for script run in IPython")
1116 1114 else:
1117 1115 main_mod.__dict__.clear()
1118 1116 main_mod.__name__ = modname
1119 1117
1120 1118 main_mod.__file__ = filename
1121 1119 # It seems pydoc (and perhaps others) needs any module instance to
1122 1120 # implement a __nonzero__ method
1123 1121 main_mod.__nonzero__ = lambda : True
1124 1122
1125 1123 return main_mod
1126 1124
1127 1125 def clear_main_mod_cache(self):
1128 1126 """Clear the cache of main modules.
1129 1127
1130 1128 Mainly for use by utilities like %reset.
1131 1129
1132 1130 Examples
1133 1131 --------
1134 1132 In [15]: import IPython
1135 1133
1136 1134 In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
1137 1135
1138 1136 In [17]: len(_ip._main_mod_cache) > 0
1139 1137 Out[17]: True
1140 1138
1141 1139 In [18]: _ip.clear_main_mod_cache()
1142 1140
1143 1141 In [19]: len(_ip._main_mod_cache) == 0
1144 1142 Out[19]: True
1145 1143 """
1146 1144 self._main_mod_cache.clear()
1147 1145
1148 1146 #-------------------------------------------------------------------------
1149 1147 # Things related to debugging
1150 1148 #-------------------------------------------------------------------------
1151 1149
1152 1150 def init_pdb(self):
1153 1151 # Set calling of pdb on exceptions
1154 1152 # self.call_pdb is a property
1155 1153 self.call_pdb = self.pdb
1156 1154
1157 1155 def _get_call_pdb(self):
1158 1156 return self._call_pdb
1159 1157
1160 1158 def _set_call_pdb(self,val):
1161 1159
1162 1160 if val not in (0,1,False,True):
1163 1161 raise ValueError('new call_pdb value must be boolean')
1164 1162
1165 1163 # store value in instance
1166 1164 self._call_pdb = val
1167 1165
1168 1166 # notify the actual exception handlers
1169 1167 self.InteractiveTB.call_pdb = val
1170 1168
1171 1169 call_pdb = property(_get_call_pdb,_set_call_pdb,None,
1172 1170 'Control auto-activation of pdb at exceptions')
1173 1171
1174 1172 def debugger(self,force=False):
1175 1173 """Call the pdb debugger.
1176 1174
1177 1175 Keywords:
1178 1176
1179 1177 - force(False): by default, this routine checks the instance call_pdb
1180 1178 flag and does not actually invoke the debugger if the flag is false.
1181 1179 The 'force' option forces the debugger to activate even if the flag
1182 1180 is false.
1183 1181 """
1184 1182
1185 1183 if not (force or self.call_pdb):
1186 1184 return
1187 1185
1188 1186 if not hasattr(sys,'last_traceback'):
1189 1187 error('No traceback has been produced, nothing to debug.')
1190 1188 return
1191 1189
1192 1190 self.InteractiveTB.debugger(force=True)
1193 1191
1194 1192 #-------------------------------------------------------------------------
1195 1193 # Things related to IPython's various namespaces
1196 1194 #-------------------------------------------------------------------------
1197 1195 default_user_namespaces = True
1198 1196
1199 1197 def init_create_namespaces(self, user_module=None, user_ns=None):
1200 1198 # Create the namespace where the user will operate. user_ns is
1201 1199 # normally the only one used, and it is passed to the exec calls as
1202 1200 # the locals argument. But we do carry a user_global_ns namespace
1203 1201 # given as the exec 'globals' argument, This is useful in embedding
1204 1202 # situations where the ipython shell opens in a context where the
1205 1203 # distinction between locals and globals is meaningful. For
1206 1204 # non-embedded contexts, it is just the same object as the user_ns dict.
1207 1205
1208 1206 # FIXME. For some strange reason, __builtins__ is showing up at user
1209 1207 # level as a dict instead of a module. This is a manual fix, but I
1210 1208 # should really track down where the problem is coming from. Alex
1211 1209 # Schmolck reported this problem first.
1212 1210
1213 1211 # A useful post by Alex Martelli on this topic:
1214 1212 # Re: inconsistent value from __builtins__
1215 1213 # Von: Alex Martelli <aleaxit@yahoo.com>
1216 1214 # Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
1217 1215 # Gruppen: comp.lang.python
1218 1216
1219 1217 # Michael Hohn <hohn@hooknose.lbl.gov> wrote:
1220 1218 # > >>> print type(builtin_check.get_global_binding('__builtins__'))
1221 1219 # > <type 'dict'>
1222 1220 # > >>> print type(__builtins__)
1223 1221 # > <type 'module'>
1224 1222 # > Is this difference in return value intentional?
1225 1223
1226 1224 # Well, it's documented that '__builtins__' can be either a dictionary
1227 1225 # or a module, and it's been that way for a long time. Whether it's
1228 1226 # intentional (or sensible), I don't know. In any case, the idea is
1229 1227 # that if you need to access the built-in namespace directly, you
1230 1228 # should start with "import __builtin__" (note, no 's') which will
1231 1229 # definitely give you a module. Yeah, it's somewhat confusing:-(.
1232 1230
1233 1231 # These routines return a properly built module and dict as needed by
1234 1232 # the rest of the code, and can also be used by extension writers to
1235 1233 # generate properly initialized namespaces.
1236 1234 if (user_ns is not None) or (user_module is not None):
1237 1235 self.default_user_namespaces = False
1238 1236 self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
1239 1237
1240 1238 # A record of hidden variables we have added to the user namespace, so
1241 1239 # we can list later only variables defined in actual interactive use.
1242 1240 self.user_ns_hidden = {}
1243 1241
1244 1242 # Now that FakeModule produces a real module, we've run into a nasty
1245 1243 # problem: after script execution (via %run), the module where the user
1246 1244 # code ran is deleted. Now that this object is a true module (needed
1247 1245 # so doctest and other tools work correctly), the Python module
1248 1246 # teardown mechanism runs over it, and sets to None every variable
1249 1247 # present in that module. Top-level references to objects from the
1250 1248 # script survive, because the user_ns is updated with them. However,
1251 1249 # calling functions defined in the script that use other things from
1252 1250 # the script will fail, because the function's closure had references
1253 1251 # to the original objects, which are now all None. So we must protect
1254 1252 # these modules from deletion by keeping a cache.
1255 1253 #
1256 1254 # To avoid keeping stale modules around (we only need the one from the
1257 1255 # last run), we use a dict keyed with the full path to the script, so
1258 1256 # only the last version of the module is held in the cache. Note,
1259 1257 # however, that we must cache the module *namespace contents* (their
1260 1258 # __dict__). Because if we try to cache the actual modules, old ones
1261 1259 # (uncached) could be destroyed while still holding references (such as
1262 1260 # those held by GUI objects that tend to be long-lived)>
1263 1261 #
1264 1262 # The %reset command will flush this cache. See the cache_main_mod()
1265 1263 # and clear_main_mod_cache() methods for details on use.
1266 1264
1267 1265 # This is the cache used for 'main' namespaces
1268 1266 self._main_mod_cache = {}
1269 1267
1270 1268 # A table holding all the namespaces IPython deals with, so that
1271 1269 # introspection facilities can search easily.
1272 1270 self.ns_table = {'user_global':self.user_module.__dict__,
1273 1271 'user_local':self.user_ns,
1274 1272 'builtin':builtin_mod.__dict__
1275 1273 }
1276 1274
1277 1275 @property
1278 1276 def user_global_ns(self):
1279 1277 return self.user_module.__dict__
1280 1278
1281 1279 def prepare_user_module(self, user_module=None, user_ns=None):
1282 1280 """Prepare the module and namespace in which user code will be run.
1283 1281
1284 1282 When IPython is started normally, both parameters are None: a new module
1285 1283 is created automatically, and its __dict__ used as the namespace.
1286 1284
1287 1285 If only user_module is provided, its __dict__ is used as the namespace.
1288 1286 If only user_ns is provided, a dummy module is created, and user_ns
1289 1287 becomes the global namespace. If both are provided (as they may be
1290 1288 when embedding), user_ns is the local namespace, and user_module
1291 1289 provides the global namespace.
1292 1290
1293 1291 Parameters
1294 1292 ----------
1295 1293 user_module : module, optional
1296 1294 The current user module in which IPython is being run. If None,
1297 1295 a clean module will be created.
1298 1296 user_ns : dict, optional
1299 1297 A namespace in which to run interactive commands.
1300 1298
1301 1299 Returns
1302 1300 -------
1303 1301 A tuple of user_module and user_ns, each properly initialised.
1304 1302 """
1305 1303 if user_module is None and user_ns is not None:
1306 1304 user_ns.setdefault("__name__", "__main__")
1307 1305 user_module = DummyMod()
1308 1306 user_module.__dict__ = user_ns
1309 1307
1310 1308 if user_module is None:
1311 1309 user_module = types.ModuleType("__main__",
1312 1310 doc="Automatically created module for IPython interactive environment")
1313 1311
1314 1312 # We must ensure that __builtin__ (without the final 's') is always
1315 1313 # available and pointing to the __builtin__ *module*. For more details:
1316 1314 # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
1317 1315 user_module.__dict__.setdefault('__builtin__', builtin_mod)
1318 1316 user_module.__dict__.setdefault('__builtins__', builtin_mod)
1319 1317
1320 1318 if user_ns is None:
1321 1319 user_ns = user_module.__dict__
1322 1320
1323 1321 return user_module, user_ns
1324 1322
1325 1323 def init_sys_modules(self):
1326 1324 # We need to insert into sys.modules something that looks like a
1327 1325 # module but which accesses the IPython namespace, for shelve and
1328 1326 # pickle to work interactively. Normally they rely on getting
1329 1327 # everything out of __main__, but for embedding purposes each IPython
1330 1328 # instance has its own private namespace, so we can't go shoving
1331 1329 # everything into __main__.
1332 1330
1333 1331 # note, however, that we should only do this for non-embedded
1334 1332 # ipythons, which really mimic the __main__.__dict__ with their own
1335 1333 # namespace. Embedded instances, on the other hand, should not do
1336 1334 # this because they need to manage the user local/global namespaces
1337 1335 # only, but they live within a 'normal' __main__ (meaning, they
1338 1336 # shouldn't overtake the execution environment of the script they're
1339 1337 # embedded in).
1340 1338
1341 1339 # This is overridden in the InteractiveShellEmbed subclass to a no-op.
1342 1340 main_name = self.user_module.__name__
1343 1341 sys.modules[main_name] = self.user_module
1344 1342
1345 1343 def init_user_ns(self):
1346 1344 """Initialize all user-visible namespaces to their minimum defaults.
1347 1345
1348 1346 Certain history lists are also initialized here, as they effectively
1349 1347 act as user namespaces.
1350 1348
1351 1349 Notes
1352 1350 -----
1353 1351 All data structures here are only filled in, they are NOT reset by this
1354 1352 method. If they were not empty before, data will simply be added to
1355 1353 them.
1356 1354 """
1357 1355 # This function works in two parts: first we put a few things in
1358 1356 # user_ns, and we sync that contents into user_ns_hidden so that these
1359 1357 # initial variables aren't shown by %who. After the sync, we add the
1360 1358 # rest of what we *do* want the user to see with %who even on a new
1361 1359 # session (probably nothing, so they really only see their own stuff)
1362 1360
1363 1361 # The user dict must *always* have a __builtin__ reference to the
1364 1362 # Python standard __builtin__ namespace, which must be imported.
1365 1363 # This is so that certain operations in prompt evaluation can be
1366 1364 # reliably executed with builtins. Note that we can NOT use
1367 1365 # __builtins__ (note the 's'), because that can either be a dict or a
1368 1366 # module, and can even mutate at runtime, depending on the context
1369 1367 # (Python makes no guarantees on it). In contrast, __builtin__ is
1370 1368 # always a module object, though it must be explicitly imported.
1371 1369
1372 1370 # For more details:
1373 1371 # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
1374 1372 ns = {}
1375 1373
1376 1374 # make global variables for user access to the histories
1377 1375 ns['_ih'] = self.history_manager.input_hist_parsed
1378 1376 ns['_oh'] = self.history_manager.output_hist
1379 1377 ns['_dh'] = self.history_manager.dir_hist
1380 1378
1381 1379 # user aliases to input and output histories. These shouldn't show up
1382 1380 # in %who, as they can have very large reprs.
1383 1381 ns['In'] = self.history_manager.input_hist_parsed
1384 1382 ns['Out'] = self.history_manager.output_hist
1385 1383
1386 1384 # Store myself as the public api!!!
1387 1385 ns['get_ipython'] = self.get_ipython
1388 1386
1389 1387 ns['exit'] = self.exiter
1390 1388 ns['quit'] = self.exiter
1391 1389 ns["open"] = _modified_open
1392 1390
1393 1391 # Sync what we've added so far to user_ns_hidden so these aren't seen
1394 1392 # by %who
1395 1393 self.user_ns_hidden.update(ns)
1396 1394
1397 1395 # Anything put into ns now would show up in %who. Think twice before
1398 1396 # putting anything here, as we really want %who to show the user their
1399 1397 # stuff, not our variables.
1400 1398
1401 1399 # Finally, update the real user's namespace
1402 1400 self.user_ns.update(ns)
1403 1401
1404 1402 @property
1405 1403 def all_ns_refs(self):
1406 1404 """Get a list of references to all the namespace dictionaries in which
1407 1405 IPython might store a user-created object.
1408 1406
1409 1407 Note that this does not include the displayhook, which also caches
1410 1408 objects from the output."""
1411 1409 return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
1412 1410 [m.__dict__ for m in self._main_mod_cache.values()]
1413 1411
1414 1412 def reset(self, new_session=True, aggressive=False):
1415 1413 """Clear all internal namespaces, and attempt to release references to
1416 1414 user objects.
1417 1415
1418 1416 If new_session is True, a new history session will be opened.
1419 1417 """
1420 1418 # Clear histories
1421 1419 assert self.history_manager is not None
1422 1420 self.history_manager.reset(new_session)
1423 1421 # Reset counter used to index all histories
1424 1422 if new_session:
1425 1423 self.execution_count = 1
1426 1424
1427 1425 # Reset last execution result
1428 1426 self.last_execution_succeeded = True
1429 1427 self.last_execution_result = None
1430 1428
1431 1429 # Flush cached output items
1432 1430 if self.displayhook.do_full_cache:
1433 1431 self.displayhook.flush()
1434 1432
1435 1433 # The main execution namespaces must be cleared very carefully,
1436 1434 # skipping the deletion of the builtin-related keys, because doing so
1437 1435 # would cause errors in many object's __del__ methods.
1438 1436 if self.user_ns is not self.user_global_ns:
1439 1437 self.user_ns.clear()
1440 1438 ns = self.user_global_ns
1441 1439 drop_keys = set(ns.keys())
1442 1440 drop_keys.discard('__builtin__')
1443 1441 drop_keys.discard('__builtins__')
1444 1442 drop_keys.discard('__name__')
1445 1443 for k in drop_keys:
1446 1444 del ns[k]
1447 1445
1448 1446 self.user_ns_hidden.clear()
1449 1447
1450 1448 # Restore the user namespaces to minimal usability
1451 1449 self.init_user_ns()
1452 1450 if aggressive and not hasattr(self, "_sys_modules_keys"):
1453 1451 print("Cannot restore sys.module, no snapshot")
1454 1452 elif aggressive:
1455 1453 print("culling sys module...")
1456 1454 current_keys = set(sys.modules.keys())
1457 1455 for k in current_keys - self._sys_modules_keys:
1458 1456 if k.startswith("multiprocessing"):
1459 1457 continue
1460 1458 del sys.modules[k]
1461 1459
1462 1460 # Restore the default and user aliases
1463 1461 self.alias_manager.clear_aliases()
1464 1462 self.alias_manager.init_aliases()
1465 1463
1466 1464 # Now define aliases that only make sense on the terminal, because they
1467 1465 # need direct access to the console in a way that we can't emulate in
1468 1466 # GUI or web frontend
1469 1467 if os.name == 'posix':
1470 1468 for cmd in ('clear', 'more', 'less', 'man'):
1471 1469 if cmd not in self.magics_manager.magics['line']:
1472 1470 self.alias_manager.soft_define_alias(cmd, cmd)
1473 1471
1474 1472 # Flush the private list of module references kept for script
1475 1473 # execution protection
1476 1474 self.clear_main_mod_cache()
1477 1475
1478 1476 def del_var(self, varname, by_name=False):
1479 1477 """Delete a variable from the various namespaces, so that, as
1480 1478 far as possible, we're not keeping any hidden references to it.
1481 1479
1482 1480 Parameters
1483 1481 ----------
1484 1482 varname : str
1485 1483 The name of the variable to delete.
1486 1484 by_name : bool
1487 1485 If True, delete variables with the given name in each
1488 1486 namespace. If False (default), find the variable in the user
1489 1487 namespace, and delete references to it.
1490 1488 """
1491 1489 if varname in ('__builtin__', '__builtins__'):
1492 1490 raise ValueError("Refusing to delete %s" % varname)
1493 1491
1494 1492 ns_refs = self.all_ns_refs
1495 1493
1496 1494 if by_name: # Delete by name
1497 1495 for ns in ns_refs:
1498 1496 try:
1499 1497 del ns[varname]
1500 1498 except KeyError:
1501 1499 pass
1502 1500 else: # Delete by object
1503 1501 try:
1504 1502 obj = self.user_ns[varname]
1505 1503 except KeyError as e:
1506 1504 raise NameError("name '%s' is not defined" % varname) from e
1507 1505 # Also check in output history
1508 1506 assert self.history_manager is not None
1509 1507 ns_refs.append(self.history_manager.output_hist)
1510 1508 for ns in ns_refs:
1511 1509 to_delete = [n for n, o in ns.items() if o is obj]
1512 1510 for name in to_delete:
1513 1511 del ns[name]
1514 1512
1515 1513 # Ensure it is removed from the last execution result
1516 1514 if self.last_execution_result.result is obj:
1517 1515 self.last_execution_result = None
1518 1516
1519 1517 # displayhook keeps extra references, but not in a dictionary
1520 1518 for name in ('_', '__', '___'):
1521 1519 if getattr(self.displayhook, name) is obj:
1522 1520 setattr(self.displayhook, name, None)
1523 1521
1524 1522 def reset_selective(self, regex=None):
1525 1523 """Clear selective variables from internal namespaces based on a
1526 1524 specified regular expression.
1527 1525
1528 1526 Parameters
1529 1527 ----------
1530 1528 regex : string or compiled pattern, optional
1531 1529 A regular expression pattern that will be used in searching
1532 1530 variable names in the users namespaces.
1533 1531 """
1534 1532 if regex is not None:
1535 1533 try:
1536 1534 m = re.compile(regex)
1537 1535 except TypeError as e:
1538 1536 raise TypeError('regex must be a string or compiled pattern') from e
1539 1537 # Search for keys in each namespace that match the given regex
1540 1538 # If a match is found, delete the key/value pair.
1541 1539 for ns in self.all_ns_refs:
1542 1540 for var in ns:
1543 1541 if m.search(var):
1544 1542 del ns[var]
1545 1543
1546 1544 def push(self, variables, interactive=True):
1547 1545 """Inject a group of variables into the IPython user namespace.
1548 1546
1549 1547 Parameters
1550 1548 ----------
1551 1549 variables : dict, str or list/tuple of str
1552 1550 The variables to inject into the user's namespace. If a dict, a
1553 1551 simple update is done. If a str, the string is assumed to have
1554 1552 variable names separated by spaces. A list/tuple of str can also
1555 1553 be used to give the variable names. If just the variable names are
1556 1554 give (list/tuple/str) then the variable values looked up in the
1557 1555 callers frame.
1558 1556 interactive : bool
1559 1557 If True (default), the variables will be listed with the ``who``
1560 1558 magic.
1561 1559 """
1562 1560 vdict = None
1563 1561
1564 1562 # We need a dict of name/value pairs to do namespace updates.
1565 1563 if isinstance(variables, dict):
1566 1564 vdict = variables
1567 1565 elif isinstance(variables, (str, list, tuple)):
1568 1566 if isinstance(variables, str):
1569 1567 vlist = variables.split()
1570 1568 else:
1571 1569 vlist = variables
1572 1570 vdict = {}
1573 1571 cf = sys._getframe(1)
1574 1572 for name in vlist:
1575 1573 try:
1576 1574 vdict[name] = eval(name, cf.f_globals, cf.f_locals)
1577 1575 except:
1578 1576 print('Could not get variable %s from %s' %
1579 1577 (name,cf.f_code.co_name))
1580 1578 else:
1581 1579 raise ValueError('variables must be a dict/str/list/tuple')
1582 1580
1583 1581 # Propagate variables to user namespace
1584 1582 self.user_ns.update(vdict)
1585 1583
1586 1584 # And configure interactive visibility
1587 1585 user_ns_hidden = self.user_ns_hidden
1588 1586 if interactive:
1589 1587 for name in vdict:
1590 1588 user_ns_hidden.pop(name, None)
1591 1589 else:
1592 1590 user_ns_hidden.update(vdict)
1593 1591
1594 1592 def drop_by_id(self, variables):
1595 1593 """Remove a dict of variables from the user namespace, if they are the
1596 1594 same as the values in the dictionary.
1597 1595
1598 1596 This is intended for use by extensions: variables that they've added can
1599 1597 be taken back out if they are unloaded, without removing any that the
1600 1598 user has overwritten.
1601 1599
1602 1600 Parameters
1603 1601 ----------
1604 1602 variables : dict
1605 1603 A dictionary mapping object names (as strings) to the objects.
1606 1604 """
1607 1605 for name, obj in variables.items():
1608 1606 if name in self.user_ns and self.user_ns[name] is obj:
1609 1607 del self.user_ns[name]
1610 1608 self.user_ns_hidden.pop(name, None)
1611 1609
1612 1610 #-------------------------------------------------------------------------
1613 1611 # Things related to object introspection
1614 1612 #-------------------------------------------------------------------------
1615 1613 @staticmethod
1616 1614 def _find_parts(oname: str) -> Tuple[bool, ListType[str]]:
1617 1615 """
1618 1616 Given an object name, return a list of parts of this object name.
1619 1617
1620 1618 Basically split on docs when using attribute access,
1621 1619 and extract the value when using square bracket.
1622 1620
1623 1621
1624 1622 For example foo.bar[3].baz[x] -> foo, bar, 3, baz, x
1625 1623
1626 1624
1627 1625 Returns
1628 1626 -------
1629 1627 parts_ok: bool
1630 1628 whether we were properly able to parse parts.
1631 1629 parts: list of str
1632 1630 extracted parts
1633 1631
1634 1632
1635 1633
1636 1634 """
1637 1635 raw_parts = oname.split(".")
1638 1636 parts = []
1639 1637 parts_ok = True
1640 1638 for p in raw_parts:
1641 1639 if p.endswith("]"):
1642 1640 var, *indices = p.split("[")
1643 1641 if not var.isidentifier():
1644 1642 parts_ok = False
1645 1643 break
1646 1644 parts.append(var)
1647 1645 for ind in indices:
1648 1646 if ind[-1] != "]" and not is_integer_string(ind[:-1]):
1649 1647 parts_ok = False
1650 1648 break
1651 1649 parts.append(ind[:-1])
1652 1650 continue
1653 1651
1654 1652 if not p.isidentifier():
1655 1653 parts_ok = False
1656 1654 parts.append(p)
1657 1655
1658 1656 return parts_ok, parts
1659 1657
1660 1658 def _ofind(
1661 1659 self, oname: str, namespaces: Optional[Sequence[Tuple[str, AnyType]]] = None
1662 1660 ) -> OInfo:
1663 1661 """Find an object in the available namespaces.
1664 1662
1665 1663
1666 1664 Returns
1667 1665 -------
1668 1666 OInfo with fields:
1669 1667 - ismagic
1670 1668 - isalias
1671 1669 - found
1672 1670 - obj
1673 1671 - namespac
1674 1672 - parent
1675 1673
1676 1674 Has special code to detect magic functions.
1677 1675 """
1678 1676 oname = oname.strip()
1679 1677 parts_ok, parts = self._find_parts(oname)
1680 1678
1681 1679 if (
1682 1680 not oname.startswith(ESC_MAGIC)
1683 1681 and not oname.startswith(ESC_MAGIC2)
1684 1682 and not parts_ok
1685 1683 ):
1686 1684 return OInfo(
1687 1685 ismagic=False,
1688 1686 isalias=False,
1689 1687 found=False,
1690 1688 obj=None,
1691 1689 namespace=None,
1692 1690 parent=None,
1693 1691 )
1694 1692
1695 1693 if namespaces is None:
1696 1694 # Namespaces to search in:
1697 1695 # Put them in a list. The order is important so that we
1698 1696 # find things in the same order that Python finds them.
1699 1697 namespaces = [ ('Interactive', self.user_ns),
1700 1698 ('Interactive (global)', self.user_global_ns),
1701 1699 ('Python builtin', builtin_mod.__dict__),
1702 1700 ]
1703 1701
1704 1702 ismagic = False
1705 1703 isalias = False
1706 1704 found = False
1707 1705 ospace = None
1708 1706 parent = None
1709 1707 obj = None
1710 1708
1711 1709
1712 1710 # Look for the given name by splitting it in parts. If the head is
1713 1711 # found, then we look for all the remaining parts as members, and only
1714 1712 # declare success if we can find them all.
1715 1713 oname_parts = parts
1716 1714 oname_head, oname_rest = oname_parts[0],oname_parts[1:]
1717 1715 for nsname,ns in namespaces:
1718 1716 try:
1719 1717 obj = ns[oname_head]
1720 1718 except KeyError:
1721 1719 continue
1722 1720 else:
1723 1721 for idx, part in enumerate(oname_rest):
1724 1722 try:
1725 1723 parent = obj
1726 1724 # The last part is looked up in a special way to avoid
1727 1725 # descriptor invocation as it may raise or have side
1728 1726 # effects.
1729 1727 if idx == len(oname_rest) - 1:
1730 1728 obj = self._getattr_property(obj, part)
1731 1729 else:
1732 1730 if is_integer_string(part):
1733 1731 obj = obj[int(part)]
1734 1732 else:
1735 1733 obj = getattr(obj, part)
1736 1734 except:
1737 1735 # Blanket except b/c some badly implemented objects
1738 1736 # allow __getattr__ to raise exceptions other than
1739 1737 # AttributeError, which then crashes IPython.
1740 1738 break
1741 1739 else:
1742 1740 # If we finish the for loop (no break), we got all members
1743 1741 found = True
1744 1742 ospace = nsname
1745 1743 break # namespace loop
1746 1744
1747 1745 # Try to see if it's magic
1748 1746 if not found:
1749 1747 obj = None
1750 1748 if oname.startswith(ESC_MAGIC2):
1751 1749 oname = oname.lstrip(ESC_MAGIC2)
1752 1750 obj = self.find_cell_magic(oname)
1753 1751 elif oname.startswith(ESC_MAGIC):
1754 1752 oname = oname.lstrip(ESC_MAGIC)
1755 1753 obj = self.find_line_magic(oname)
1756 1754 else:
1757 1755 # search without prefix, so run? will find %run?
1758 1756 obj = self.find_line_magic(oname)
1759 1757 if obj is None:
1760 1758 obj = self.find_cell_magic(oname)
1761 1759 if obj is not None:
1762 1760 found = True
1763 1761 ospace = 'IPython internal'
1764 1762 ismagic = True
1765 1763 isalias = isinstance(obj, Alias)
1766 1764
1767 1765 # Last try: special-case some literals like '', [], {}, etc:
1768 1766 if not found and oname_head in ["''",'""','[]','{}','()']:
1769 1767 obj = eval(oname_head)
1770 1768 found = True
1771 1769 ospace = 'Interactive'
1772 1770
1773 1771 return OInfo(
1774 1772 obj=obj,
1775 1773 found=found,
1776 1774 parent=parent,
1777 1775 ismagic=ismagic,
1778 1776 isalias=isalias,
1779 1777 namespace=ospace,
1780 1778 )
1781 1779
1782 1780 @staticmethod
1783 1781 def _getattr_property(obj, attrname):
1784 1782 """Property-aware getattr to use in object finding.
1785 1783
1786 1784 If attrname represents a property, return it unevaluated (in case it has
1787 1785 side effects or raises an error.
1788 1786
1789 1787 """
1790 1788 if not isinstance(obj, type):
1791 1789 try:
1792 1790 # `getattr(type(obj), attrname)` is not guaranteed to return
1793 1791 # `obj`, but does so for property:
1794 1792 #
1795 1793 # property.__get__(self, None, cls) -> self
1796 1794 #
1797 1795 # The universal alternative is to traverse the mro manually
1798 1796 # searching for attrname in class dicts.
1799 1797 if is_integer_string(attrname):
1800 1798 return obj[int(attrname)]
1801 1799 else:
1802 1800 attr = getattr(type(obj), attrname)
1803 1801 except AttributeError:
1804 1802 pass
1805 1803 else:
1806 1804 # This relies on the fact that data descriptors (with both
1807 1805 # __get__ & __set__ magic methods) take precedence over
1808 1806 # instance-level attributes:
1809 1807 #
1810 1808 # class A(object):
1811 1809 # @property
1812 1810 # def foobar(self): return 123
1813 1811 # a = A()
1814 1812 # a.__dict__['foobar'] = 345
1815 1813 # a.foobar # == 123
1816 1814 #
1817 1815 # So, a property may be returned right away.
1818 1816 if isinstance(attr, property):
1819 1817 return attr
1820 1818
1821 1819 # Nothing helped, fall back.
1822 1820 return getattr(obj, attrname)
1823 1821
1824 1822 def _object_find(self, oname, namespaces=None) -> OInfo:
1825 1823 """Find an object and return a struct with info about it."""
1826 1824 return self._ofind(oname, namespaces)
1827 1825
1828 1826 def _inspect(self, meth, oname: str, namespaces=None, **kw):
1829 1827 """Generic interface to the inspector system.
1830 1828
1831 1829 This function is meant to be called by pdef, pdoc & friends.
1832 1830 """
1833 1831 info: OInfo = self._object_find(oname, namespaces)
1834 1832 if self.sphinxify_docstring:
1835 1833 if sphinxify is None:
1836 1834 raise ImportError("Module ``docrepr`` required but missing")
1837 1835 docformat = sphinxify(self.object_inspect(oname))
1838 1836 else:
1839 1837 docformat = None
1840 1838 if info.found or hasattr(info.parent, oinspect.HOOK_NAME):
1841 1839 pmethod = getattr(self.inspector, meth)
1842 1840 # TODO: only apply format_screen to the plain/text repr of the mime
1843 1841 # bundle.
1844 1842 formatter = format_screen if info.ismagic else docformat
1845 1843 if meth == 'pdoc':
1846 1844 pmethod(info.obj, oname, formatter)
1847 1845 elif meth == 'pinfo':
1848 1846 pmethod(
1849 1847 info.obj,
1850 1848 oname,
1851 1849 formatter,
1852 1850 info,
1853 1851 enable_html_pager=self.enable_html_pager,
1854 1852 **kw,
1855 1853 )
1856 1854 else:
1857 1855 pmethod(info.obj, oname)
1858 1856 else:
1859 1857 print('Object `%s` not found.' % oname)
1860 1858 return 'not found' # so callers can take other action
1861 1859
1862 1860 def object_inspect(self, oname, detail_level=0):
1863 1861 """Get object info about oname"""
1864 1862 with self.builtin_trap:
1865 1863 info = self._object_find(oname)
1866 1864 if info.found:
1867 1865 return self.inspector.info(info.obj, oname, info=info,
1868 1866 detail_level=detail_level
1869 1867 )
1870 1868 else:
1871 1869 return oinspect.object_info(name=oname, found=False)
1872 1870
1873 1871 def object_inspect_text(self, oname, detail_level=0):
1874 1872 """Get object info as formatted text"""
1875 1873 return self.object_inspect_mime(oname, detail_level)['text/plain']
1876 1874
1877 1875 def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
1878 1876 """Get object info as a mimebundle of formatted representations.
1879 1877
1880 1878 A mimebundle is a dictionary, keyed by mime-type.
1881 1879 It must always have the key `'text/plain'`.
1882 1880 """
1883 1881 with self.builtin_trap:
1884 1882 info = self._object_find(oname)
1885 1883 if info.found:
1886 1884 docformat = (
1887 1885 sphinxify(self.object_inspect(oname))
1888 1886 if self.sphinxify_docstring
1889 1887 else None
1890 1888 )
1891 1889 return self.inspector._get_info(
1892 1890 info.obj,
1893 1891 oname,
1894 1892 info=info,
1895 1893 detail_level=detail_level,
1896 1894 formatter=docformat,
1897 1895 omit_sections=omit_sections,
1898 1896 )
1899 1897 else:
1900 1898 raise KeyError(oname)
1901 1899
1902 1900 #-------------------------------------------------------------------------
1903 1901 # Things related to history management
1904 1902 #-------------------------------------------------------------------------
1905 1903
1906 1904 def init_history(self):
1907 1905 """Sets up the command history, and starts regular autosaves."""
1908 1906 self.history_manager = HistoryManager(shell=self, parent=self)
1909 1907 self.configurables.append(self.history_manager)
1910 1908
1911 1909 #-------------------------------------------------------------------------
1912 1910 # Things related to exception handling and tracebacks (not debugging)
1913 1911 #-------------------------------------------------------------------------
1914 1912
1915 1913 debugger_cls = InterruptiblePdb
1916 1914
1917 1915 def init_traceback_handlers(self, custom_exceptions):
1918 1916 # Syntax error handler.
1919 1917 self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
1920 1918
1921 1919 # The interactive one is initialized with an offset, meaning we always
1922 1920 # want to remove the topmost item in the traceback, which is our own
1923 1921 # internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
1924 1922 self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
1925 1923 color_scheme='NoColor',
1926 1924 tb_offset = 1,
1927 1925 debugger_cls=self.debugger_cls, parent=self)
1928 1926
1929 1927 # The instance will store a pointer to the system-wide exception hook,
1930 1928 # so that runtime code (such as magics) can access it. This is because
1931 1929 # during the read-eval loop, it may get temporarily overwritten.
1932 1930 self.sys_excepthook = sys.excepthook
1933 1931
1934 1932 # and add any custom exception handlers the user may have specified
1935 1933 self.set_custom_exc(*custom_exceptions)
1936 1934
1937 1935 # Set the exception mode
1938 1936 self.InteractiveTB.set_mode(mode=self.xmode)
1939 1937
1940 1938 def set_custom_exc(self, exc_tuple, handler):
1941 1939 """set_custom_exc(exc_tuple, handler)
1942 1940
1943 1941 Set a custom exception handler, which will be called if any of the
1944 1942 exceptions in exc_tuple occur in the mainloop (specifically, in the
1945 1943 run_code() method).
1946 1944
1947 1945 Parameters
1948 1946 ----------
1949 1947 exc_tuple : tuple of exception classes
1950 1948 A *tuple* of exception classes, for which to call the defined
1951 1949 handler. It is very important that you use a tuple, and NOT A
1952 1950 LIST here, because of the way Python's except statement works. If
1953 1951 you only want to trap a single exception, use a singleton tuple::
1954 1952
1955 1953 exc_tuple == (MyCustomException,)
1956 1954
1957 1955 handler : callable
1958 1956 handler must have the following signature::
1959 1957
1960 1958 def my_handler(self, etype, value, tb, tb_offset=None):
1961 1959 ...
1962 1960 return structured_traceback
1963 1961
1964 1962 Your handler must return a structured traceback (a list of strings),
1965 1963 or None.
1966 1964
1967 1965 This will be made into an instance method (via types.MethodType)
1968 1966 of IPython itself, and it will be called if any of the exceptions
1969 1967 listed in the exc_tuple are caught. If the handler is None, an
1970 1968 internal basic one is used, which just prints basic info.
1971 1969
1972 1970 To protect IPython from crashes, if your handler ever raises an
1973 1971 exception or returns an invalid result, it will be immediately
1974 1972 disabled.
1975 1973
1976 1974 Notes
1977 1975 -----
1978 1976 WARNING: by putting in your own exception handler into IPython's main
1979 1977 execution loop, you run a very good chance of nasty crashes. This
1980 1978 facility should only be used if you really know what you are doing.
1981 1979 """
1982 1980
1983 1981 if not isinstance(exc_tuple, tuple):
1984 1982 raise TypeError("The custom exceptions must be given as a tuple.")
1985 1983
1986 1984 def dummy_handler(self, etype, value, tb, tb_offset=None):
1987 1985 print('*** Simple custom exception handler ***')
1988 1986 print('Exception type :', etype)
1989 1987 print('Exception value:', value)
1990 1988 print('Traceback :', tb)
1991 1989
1992 1990 def validate_stb(stb):
1993 1991 """validate structured traceback return type
1994 1992
1995 1993 return type of CustomTB *should* be a list of strings, but allow
1996 1994 single strings or None, which are harmless.
1997 1995
1998 1996 This function will *always* return a list of strings,
1999 1997 and will raise a TypeError if stb is inappropriate.
2000 1998 """
2001 1999 msg = "CustomTB must return list of strings, not %r" % stb
2002 2000 if stb is None:
2003 2001 return []
2004 2002 elif isinstance(stb, str):
2005 2003 return [stb]
2006 2004 elif not isinstance(stb, list):
2007 2005 raise TypeError(msg)
2008 2006 # it's a list
2009 2007 for line in stb:
2010 2008 # check every element
2011 2009 if not isinstance(line, str):
2012 2010 raise TypeError(msg)
2013 2011 return stb
2014 2012
2015 2013 if handler is None:
2016 2014 wrapped = dummy_handler
2017 2015 else:
2018 2016 def wrapped(self,etype,value,tb,tb_offset=None):
2019 2017 """wrap CustomTB handler, to protect IPython from user code
2020 2018
2021 2019 This makes it harder (but not impossible) for custom exception
2022 2020 handlers to crash IPython.
2023 2021 """
2024 2022 try:
2025 2023 stb = handler(self,etype,value,tb,tb_offset=tb_offset)
2026 2024 return validate_stb(stb)
2027 2025 except:
2028 2026 # clear custom handler immediately
2029 2027 self.set_custom_exc((), None)
2030 2028 print("Custom TB Handler failed, unregistering", file=sys.stderr)
2031 2029 # show the exception in handler first
2032 2030 stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
2033 2031 print(self.InteractiveTB.stb2text(stb))
2034 2032 print("The original exception:")
2035 2033 stb = self.InteractiveTB.structured_traceback(
2036 2034 etype, value, tb, tb_offset=tb_offset
2037 2035 )
2038 2036 return stb
2039 2037
2040 2038 self.CustomTB = types.MethodType(wrapped,self)
2041 2039 self.custom_exceptions = exc_tuple
2042 2040
2043 2041 def excepthook(self, etype, value, tb):
2044 2042 """One more defense for GUI apps that call sys.excepthook.
2045 2043
2046 2044 GUI frameworks like wxPython trap exceptions and call
2047 2045 sys.excepthook themselves. I guess this is a feature that
2048 2046 enables them to keep running after exceptions that would
2049 2047 otherwise kill their mainloop. This is a bother for IPython
2050 2048 which expects to catch all of the program exceptions with a try:
2051 2049 except: statement.
2052 2050
2053 2051 Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
2054 2052 any app directly invokes sys.excepthook, it will look to the user like
2055 2053 IPython crashed. In order to work around this, we can disable the
2056 2054 CrashHandler and replace it with this excepthook instead, which prints a
2057 2055 regular traceback using our InteractiveTB. In this fashion, apps which
2058 2056 call sys.excepthook will generate a regular-looking exception from
2059 2057 IPython, and the CrashHandler will only be triggered by real IPython
2060 2058 crashes.
2061 2059
2062 2060 This hook should be used sparingly, only in places which are not likely
2063 2061 to be true IPython errors.
2064 2062 """
2065 2063 self.showtraceback((etype, value, tb), tb_offset=0)
2066 2064
2067 2065 def _get_exc_info(self, exc_tuple=None):
2068 2066 """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
2069 2067
2070 2068 Ensures sys.last_type,value,traceback hold the exc_info we found,
2071 2069 from whichever source.
2072 2070
2073 2071 raises ValueError if none of these contain any information
2074 2072 """
2075 2073 if exc_tuple is None:
2076 2074 etype, value, tb = sys.exc_info()
2077 2075 else:
2078 2076 etype, value, tb = exc_tuple
2079 2077
2080 2078 if etype is None:
2081 2079 if hasattr(sys, 'last_type'):
2082 2080 etype, value, tb = sys.last_type, sys.last_value, \
2083 2081 sys.last_traceback
2084 2082
2085 2083 if etype is None:
2086 2084 raise ValueError("No exception to find")
2087 2085
2088 2086 # Now store the exception info in sys.last_type etc.
2089 2087 # WARNING: these variables are somewhat deprecated and not
2090 2088 # necessarily safe to use in a threaded environment, but tools
2091 2089 # like pdb depend on their existence, so let's set them. If we
2092 2090 # find problems in the field, we'll need to revisit their use.
2093 2091 sys.last_type = etype
2094 2092 sys.last_value = value
2095 2093 sys.last_traceback = tb
2096 2094
2097 2095 return etype, value, tb
2098 2096
2099 2097 def show_usage_error(self, exc):
2100 2098 """Show a short message for UsageErrors
2101 2099
2102 2100 These are special exceptions that shouldn't show a traceback.
2103 2101 """
2104 2102 print("UsageError: %s" % exc, file=sys.stderr)
2105 2103
2106 2104 def get_exception_only(self, exc_tuple=None):
2107 2105 """
2108 2106 Return as a string (ending with a newline) the exception that
2109 2107 just occurred, without any traceback.
2110 2108 """
2111 2109 etype, value, tb = self._get_exc_info(exc_tuple)
2112 2110 msg = traceback.format_exception_only(etype, value)
2113 2111 return ''.join(msg)
2114 2112
2115 2113 def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
2116 2114 exception_only=False, running_compiled_code=False):
2117 2115 """Display the exception that just occurred.
2118 2116
2119 2117 If nothing is known about the exception, this is the method which
2120 2118 should be used throughout the code for presenting user tracebacks,
2121 2119 rather than directly invoking the InteractiveTB object.
2122 2120
2123 2121 A specific showsyntaxerror() also exists, but this method can take
2124 2122 care of calling it if needed, so unless you are explicitly catching a
2125 2123 SyntaxError exception, don't try to analyze the stack manually and
2126 2124 simply call this method."""
2127 2125
2128 2126 try:
2129 2127 try:
2130 2128 etype, value, tb = self._get_exc_info(exc_tuple)
2131 2129 except ValueError:
2132 2130 print('No traceback available to show.', file=sys.stderr)
2133 2131 return
2134 2132
2135 2133 if issubclass(etype, SyntaxError):
2136 2134 # Though this won't be called by syntax errors in the input
2137 2135 # line, there may be SyntaxError cases with imported code.
2138 2136 self.showsyntaxerror(filename, running_compiled_code)
2139 2137 elif etype is UsageError:
2140 2138 self.show_usage_error(value)
2141 2139 else:
2142 2140 if exception_only:
2143 2141 stb = ['An exception has occurred, use %tb to see '
2144 2142 'the full traceback.\n']
2145 2143 stb.extend(self.InteractiveTB.get_exception_only(etype,
2146 2144 value))
2147 2145 else:
2148 2146
2149 2147 def contains_exceptiongroup(val):
2150 2148 if val is None:
2151 2149 return False
2152 2150 return isinstance(
2153 2151 val, BaseExceptionGroup
2154 2152 ) or contains_exceptiongroup(val.__context__)
2155 2153
2156 2154 if contains_exceptiongroup(value):
2157 2155 # fall back to native exception formatting until ultratb
2158 2156 # supports exception groups
2159 2157 traceback.print_exc()
2160 2158 else:
2161 2159 try:
2162 2160 # Exception classes can customise their traceback - we
2163 2161 # use this in IPython.parallel for exceptions occurring
2164 2162 # in the engines. This should return a list of strings.
2165 2163 if hasattr(value, "_render_traceback_"):
2166 2164 stb = value._render_traceback_()
2167 2165 else:
2168 2166 stb = self.InteractiveTB.structured_traceback(
2169 2167 etype, value, tb, tb_offset=tb_offset
2170 2168 )
2171 2169
2172 2170 except Exception:
2173 2171 print(
2174 2172 "Unexpected exception formatting exception. Falling back to standard exception"
2175 2173 )
2176 2174 traceback.print_exc()
2177 2175 return None
2178 2176
2179 2177 self._showtraceback(etype, value, stb)
2180 2178 if self.call_pdb:
2181 2179 # drop into debugger
2182 2180 self.debugger(force=True)
2183 2181 return
2184 2182
2185 2183 # Actually show the traceback
2186 2184 self._showtraceback(etype, value, stb)
2187 2185
2188 2186 except KeyboardInterrupt:
2189 2187 print('\n' + self.get_exception_only(), file=sys.stderr)
2190 2188
2191 2189 def _showtraceback(self, etype, evalue, stb: str):
2192 2190 """Actually show a traceback.
2193 2191
2194 2192 Subclasses may override this method to put the traceback on a different
2195 2193 place, like a side channel.
2196 2194 """
2197 2195 val = self.InteractiveTB.stb2text(stb)
2198 2196 try:
2199 2197 print(val)
2200 2198 except UnicodeEncodeError:
2201 2199 print(val.encode("utf-8", "backslashreplace").decode())
2202 2200
2203 2201 def showsyntaxerror(self, filename=None, running_compiled_code=False):
2204 2202 """Display the syntax error that just occurred.
2205 2203
2206 2204 This doesn't display a stack trace because there isn't one.
2207 2205
2208 2206 If a filename is given, it is stuffed in the exception instead
2209 2207 of what was there before (because Python's parser always uses
2210 2208 "<string>" when reading from a string).
2211 2209
2212 2210 If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
2213 2211 longer stack trace will be displayed.
2214 2212 """
2215 2213 etype, value, last_traceback = self._get_exc_info()
2216 2214
2217 2215 if filename and issubclass(etype, SyntaxError):
2218 2216 try:
2219 2217 value.filename = filename
2220 2218 except:
2221 2219 # Not the format we expect; leave it alone
2222 2220 pass
2223 2221
2224 2222 # If the error occurred when executing compiled code, we should provide full stacktrace.
2225 2223 elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
2226 2224 stb = self.SyntaxTB.structured_traceback(etype, value, elist)
2227 2225 self._showtraceback(etype, value, stb)
2228 2226
2229 2227 # This is overridden in TerminalInteractiveShell to show a message about
2230 2228 # the %paste magic.
2231 2229 def showindentationerror(self):
2232 2230 """Called by _run_cell when there's an IndentationError in code entered
2233 2231 at the prompt.
2234 2232
2235 2233 This is overridden in TerminalInteractiveShell to show a message about
2236 2234 the %paste magic."""
2237 2235 self.showsyntaxerror()
2238 2236
2239 2237 @skip_doctest
2240 2238 def set_next_input(self, s, replace=False):
2241 2239 """ Sets the 'default' input string for the next command line.
2242 2240
2243 2241 Example::
2244 2242
2245 2243 In [1]: _ip.set_next_input("Hello Word")
2246 2244 In [2]: Hello Word_ # cursor is here
2247 2245 """
2248 2246 self.rl_next_input = s
2249 2247
2250 2248 def _indent_current_str(self):
2251 2249 """return the current level of indentation as a string"""
2252 2250 return self.input_splitter.get_indent_spaces() * ' '
2253 2251
2254 2252 #-------------------------------------------------------------------------
2255 2253 # Things related to text completion
2256 2254 #-------------------------------------------------------------------------
2257 2255
2258 2256 def init_completer(self):
2259 2257 """Initialize the completion machinery.
2260 2258
2261 2259 This creates completion machinery that can be used by client code,
2262 2260 either interactively in-process (typically triggered by the readline
2263 2261 library), programmatically (such as in test suites) or out-of-process
2264 2262 (typically over the network by remote frontends).
2265 2263 """
2266 2264 from IPython.core.completer import IPCompleter
2267 2265 from IPython.core.completerlib import (
2268 2266 cd_completer,
2269 2267 magic_run_completer,
2270 2268 module_completer,
2271 2269 reset_completer,
2272 2270 )
2273 2271
2274 2272 self.Completer = IPCompleter(shell=self,
2275 2273 namespace=self.user_ns,
2276 2274 global_namespace=self.user_global_ns,
2277 2275 parent=self,
2278 2276 )
2279 2277 self.configurables.append(self.Completer)
2280 2278
2281 2279 # Add custom completers to the basic ones built into IPCompleter
2282 2280 sdisp = self.strdispatchers.get('complete_command', StrDispatch())
2283 2281 self.strdispatchers['complete_command'] = sdisp
2284 2282 self.Completer.custom_completers = sdisp
2285 2283
2286 2284 self.set_hook('complete_command', module_completer, str_key = 'import')
2287 2285 self.set_hook('complete_command', module_completer, str_key = 'from')
2288 2286 self.set_hook('complete_command', module_completer, str_key = '%aimport')
2289 2287 self.set_hook('complete_command', magic_run_completer, str_key = '%run')
2290 2288 self.set_hook('complete_command', cd_completer, str_key = '%cd')
2291 2289 self.set_hook('complete_command', reset_completer, str_key = '%reset')
2292 2290
2293 2291 @skip_doctest
2294 2292 def complete(self, text, line=None, cursor_pos=None):
2295 2293 """Return the completed text and a list of completions.
2296 2294
2297 2295 Parameters
2298 2296 ----------
2299 2297 text : string
2300 2298 A string of text to be completed on. It can be given as empty and
2301 2299 instead a line/position pair are given. In this case, the
2302 2300 completer itself will split the line like readline does.
2303 2301 line : string, optional
2304 2302 The complete line that text is part of.
2305 2303 cursor_pos : int, optional
2306 2304 The position of the cursor on the input line.
2307 2305
2308 2306 Returns
2309 2307 -------
2310 2308 text : string
2311 2309 The actual text that was completed.
2312 2310 matches : list
2313 2311 A sorted list with all possible completions.
2314 2312
2315 2313 Notes
2316 2314 -----
2317 2315 The optional arguments allow the completion to take more context into
2318 2316 account, and are part of the low-level completion API.
2319 2317
2320 2318 This is a wrapper around the completion mechanism, similar to what
2321 2319 readline does at the command line when the TAB key is hit. By
2322 2320 exposing it as a method, it can be used by other non-readline
2323 2321 environments (such as GUIs) for text completion.
2324 2322
2325 2323 Examples
2326 2324 --------
2327 2325 In [1]: x = 'hello'
2328 2326
2329 2327 In [2]: _ip.complete('x.l')
2330 2328 Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
2331 2329 """
2332 2330
2333 2331 # Inject names into __builtin__ so we can complete on the added names.
2334 2332 with self.builtin_trap:
2335 2333 return self.Completer.complete(text, line, cursor_pos)
2336 2334
2337 2335 def set_custom_completer(self, completer, pos=0) -> None:
2338 2336 """Adds a new custom completer function.
2339 2337
2340 2338 The position argument (defaults to 0) is the index in the completers
2341 2339 list where you want the completer to be inserted.
2342 2340
2343 2341 `completer` should have the following signature::
2344 2342
2345 2343 def completion(self: Completer, text: string) -> List[str]:
2346 2344 raise NotImplementedError
2347 2345
2348 2346 It will be bound to the current Completer instance and pass some text
2349 2347 and return a list with current completions to suggest to the user.
2350 2348 """
2351 2349
2352 2350 newcomp = types.MethodType(completer, self.Completer)
2353 2351 self.Completer.custom_matchers.insert(pos,newcomp)
2354 2352
2355 2353 def set_completer_frame(self, frame=None):
2356 2354 """Set the frame of the completer."""
2357 2355 if frame:
2358 2356 self.Completer.namespace = frame.f_locals
2359 2357 self.Completer.global_namespace = frame.f_globals
2360 2358 else:
2361 2359 self.Completer.namespace = self.user_ns
2362 2360 self.Completer.global_namespace = self.user_global_ns
2363 2361
2364 2362 #-------------------------------------------------------------------------
2365 2363 # Things related to magics
2366 2364 #-------------------------------------------------------------------------
2367 2365
2368 2366 def init_magics(self):
2369 2367 from IPython.core import magics as m
2370 2368 self.magics_manager = magic.MagicsManager(shell=self,
2371 2369 parent=self,
2372 2370 user_magics=m.UserMagics(self))
2373 2371 self.configurables.append(self.magics_manager)
2374 2372
2375 2373 # Expose as public API from the magics manager
2376 2374 self.register_magics = self.magics_manager.register
2377 2375
2378 2376 self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
2379 2377 m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
2380 2378 m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
2381 2379 m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
2382 2380 m.PylabMagics, m.ScriptMagics,
2383 2381 )
2384 2382 self.register_magics(m.AsyncMagics)
2385 2383
2386 2384 # Register Magic Aliases
2387 2385 mman = self.magics_manager
2388 2386 # FIXME: magic aliases should be defined by the Magics classes
2389 2387 # or in MagicsManager, not here
2390 2388 mman.register_alias('ed', 'edit')
2391 2389 mman.register_alias('hist', 'history')
2392 2390 mman.register_alias('rep', 'recall')
2393 2391 mman.register_alias('SVG', 'svg', 'cell')
2394 2392 mman.register_alias('HTML', 'html', 'cell')
2395 2393 mman.register_alias('file', 'writefile', 'cell')
2396 2394
2397 2395 # FIXME: Move the color initialization to the DisplayHook, which
2398 2396 # should be split into a prompt manager and displayhook. We probably
2399 2397 # even need a centralize colors management object.
2400 2398 self.run_line_magic('colors', self.colors)
2401 2399
2402 2400 # Defined here so that it's included in the documentation
2403 2401 @functools.wraps(magic.MagicsManager.register_function)
2404 2402 def register_magic_function(self, func, magic_kind='line', magic_name=None):
2405 2403 self.magics_manager.register_function(
2406 2404 func, magic_kind=magic_kind, magic_name=magic_name
2407 2405 )
2408 2406
2409 2407 def _find_with_lazy_load(self, /, type_, magic_name: str):
2410 2408 """
2411 2409 Try to find a magic potentially lazy-loading it.
2412 2410
2413 2411 Parameters
2414 2412 ----------
2415 2413
2416 2414 type_: "line"|"cell"
2417 2415 the type of magics we are trying to find/lazy load.
2418 2416 magic_name: str
2419 2417 The name of the magic we are trying to find/lazy load
2420 2418
2421 2419
2422 2420 Note that this may have any side effects
2423 2421 """
2424 2422 finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
2425 2423 fn = finder(magic_name)
2426 2424 if fn is not None:
2427 2425 return fn
2428 2426 lazy = self.magics_manager.lazy_magics.get(magic_name)
2429 2427 if lazy is None:
2430 2428 return None
2431 2429
2432 2430 self.run_line_magic("load_ext", lazy)
2433 2431 res = finder(magic_name)
2434 2432 return res
2435 2433
2436 2434 def run_line_magic(self, magic_name: str, line: str, _stack_depth=1):
2437 2435 """Execute the given line magic.
2438 2436
2439 2437 Parameters
2440 2438 ----------
2441 2439 magic_name : str
2442 2440 Name of the desired magic function, without '%' prefix.
2443 2441 line : str
2444 2442 The rest of the input line as a single string.
2445 2443 _stack_depth : int
2446 2444 If run_line_magic() is called from magic() then _stack_depth=2.
2447 2445 This is added to ensure backward compatibility for use of 'get_ipython().magic()'
2448 2446 """
2449 2447 fn = self._find_with_lazy_load("line", magic_name)
2450 2448 if fn is None:
2451 2449 lazy = self.magics_manager.lazy_magics.get(magic_name)
2452 2450 if lazy:
2453 2451 self.run_line_magic("load_ext", lazy)
2454 2452 fn = self.find_line_magic(magic_name)
2455 2453 if fn is None:
2456 2454 cm = self.find_cell_magic(magic_name)
2457 2455 etpl = "Line magic function `%%%s` not found%s."
2458 2456 extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
2459 2457 'did you mean that instead?)' % magic_name )
2460 2458 raise UsageError(etpl % (magic_name, extra))
2461 2459 else:
2462 2460 # Note: this is the distance in the stack to the user's frame.
2463 2461 # This will need to be updated if the internal calling logic gets
2464 2462 # refactored, or else we'll be expanding the wrong variables.
2465 2463
2466 2464 # Determine stack_depth depending on where run_line_magic() has been called
2467 2465 stack_depth = _stack_depth
2468 2466 if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
2469 2467 # magic has opted out of var_expand
2470 2468 magic_arg_s = line
2471 2469 else:
2472 2470 magic_arg_s = self.var_expand(line, stack_depth)
2473 2471 # Put magic args in a list so we can call with f(*a) syntax
2474 2472 args = [magic_arg_s]
2475 2473 kwargs = {}
2476 2474 # Grab local namespace if we need it:
2477 2475 if getattr(fn, "needs_local_scope", False):
2478 2476 kwargs['local_ns'] = self.get_local_scope(stack_depth)
2479 2477 with self.builtin_trap:
2480 2478 result = fn(*args, **kwargs)
2481 2479
2482 2480 # The code below prevents the output from being displayed
2483 2481 # when using magics with decorator @output_can_be_silenced
2484 2482 # when the last Python token in the expression is a ';'.
2485 2483 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
2486 2484 if DisplayHook.semicolon_at_end_of_expression(magic_arg_s):
2487 2485 return None
2488 2486
2489 2487 return result
2490 2488
2491 2489 def get_local_scope(self, stack_depth):
2492 2490 """Get local scope at given stack depth.
2493 2491
2494 2492 Parameters
2495 2493 ----------
2496 2494 stack_depth : int
2497 2495 Depth relative to calling frame
2498 2496 """
2499 2497 return sys._getframe(stack_depth + 1).f_locals
2500 2498
2501 2499 def run_cell_magic(self, magic_name, line, cell):
2502 2500 """Execute the given cell magic.
2503 2501
2504 2502 Parameters
2505 2503 ----------
2506 2504 magic_name : str
2507 2505 Name of the desired magic function, without '%' prefix.
2508 2506 line : str
2509 2507 The rest of the first input line as a single string.
2510 2508 cell : str
2511 2509 The body of the cell as a (possibly multiline) string.
2512 2510 """
2513 2511 fn = self._find_with_lazy_load("cell", magic_name)
2514 2512 if fn is None:
2515 2513 lm = self.find_line_magic(magic_name)
2516 2514 etpl = "Cell magic `%%{0}` not found{1}."
2517 2515 extra = '' if lm is None else (' (But line magic `%{0}` exists, '
2518 2516 'did you mean that instead?)'.format(magic_name))
2519 2517 raise UsageError(etpl.format(magic_name, extra))
2520 2518 elif cell == '':
2521 2519 message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
2522 2520 if self.find_line_magic(magic_name) is not None:
2523 2521 message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
2524 2522 raise UsageError(message)
2525 2523 else:
2526 2524 # Note: this is the distance in the stack to the user's frame.
2527 2525 # This will need to be updated if the internal calling logic gets
2528 2526 # refactored, or else we'll be expanding the wrong variables.
2529 2527 stack_depth = 2
2530 2528 if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
2531 2529 # magic has opted out of var_expand
2532 2530 magic_arg_s = line
2533 2531 else:
2534 2532 magic_arg_s = self.var_expand(line, stack_depth)
2535 2533 kwargs = {}
2536 2534 if getattr(fn, "needs_local_scope", False):
2537 2535 kwargs['local_ns'] = self.user_ns
2538 2536
2539 2537 with self.builtin_trap:
2540 2538 args = (magic_arg_s, cell)
2541 2539 result = fn(*args, **kwargs)
2542 2540
2543 2541 # The code below prevents the output from being displayed
2544 2542 # when using magics with decorator @output_can_be_silenced
2545 2543 # when the last Python token in the expression is a ';'.
2546 2544 if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False):
2547 2545 if DisplayHook.semicolon_at_end_of_expression(cell):
2548 2546 return None
2549 2547
2550 2548 return result
2551 2549
2552 2550 def find_line_magic(self, magic_name):
2553 2551 """Find and return a line magic by name.
2554 2552
2555 2553 Returns None if the magic isn't found."""
2556 2554 return self.magics_manager.magics['line'].get(magic_name)
2557 2555
2558 2556 def find_cell_magic(self, magic_name):
2559 2557 """Find and return a cell magic by name.
2560 2558
2561 2559 Returns None if the magic isn't found."""
2562 2560 return self.magics_manager.magics['cell'].get(magic_name)
2563 2561
2564 2562 def find_magic(self, magic_name, magic_kind='line'):
2565 2563 """Find and return a magic of the given type by name.
2566 2564
2567 2565 Returns None if the magic isn't found."""
2568 2566 return self.magics_manager.magics[magic_kind].get(magic_name)
2569 2567
2570 2568 def magic(self, arg_s):
2571 2569 """
2572 2570 DEPRECATED
2573 2571
2574 2572 Deprecated since IPython 0.13 (warning added in
2575 2573 8.1), use run_line_magic(magic_name, parameter_s).
2576 2574
2577 2575 Call a magic function by name.
2578 2576
2579 2577 Input: a string containing the name of the magic function to call and
2580 2578 any additional arguments to be passed to the magic.
2581 2579
2582 2580 magic('name -opt foo bar') is equivalent to typing at the ipython
2583 2581 prompt:
2584 2582
2585 2583 In[1]: %name -opt foo bar
2586 2584
2587 2585 To call a magic without arguments, simply use magic('name').
2588 2586
2589 2587 This provides a proper Python function to call IPython's magics in any
2590 2588 valid Python code you can type at the interpreter, including loops and
2591 2589 compound statements.
2592 2590 """
2593 2591 warnings.warn(
2594 2592 "`magic(...)` is deprecated since IPython 0.13 (warning added in "
2595 2593 "8.1), use run_line_magic(magic_name, parameter_s).",
2596 2594 DeprecationWarning,
2597 2595 stacklevel=2,
2598 2596 )
2599 2597 # TODO: should we issue a loud deprecation warning here?
2600 2598 magic_name, _, magic_arg_s = arg_s.partition(' ')
2601 2599 magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
2602 2600 return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
2603 2601
2604 2602 #-------------------------------------------------------------------------
2605 2603 # Things related to macros
2606 2604 #-------------------------------------------------------------------------
2607 2605
2608 2606 def define_macro(self, name, themacro):
2609 2607 """Define a new macro
2610 2608
2611 2609 Parameters
2612 2610 ----------
2613 2611 name : str
2614 2612 The name of the macro.
2615 2613 themacro : str or Macro
2616 2614 The action to do upon invoking the macro. If a string, a new
2617 2615 Macro object is created by passing the string to it.
2618 2616 """
2619 2617
2620 2618 from IPython.core import macro
2621 2619
2622 2620 if isinstance(themacro, str):
2623 2621 themacro = macro.Macro(themacro)
2624 2622 if not isinstance(themacro, macro.Macro):
2625 2623 raise ValueError('A macro must be a string or a Macro instance.')
2626 2624 self.user_ns[name] = themacro
2627 2625
2628 2626 #-------------------------------------------------------------------------
2629 2627 # Things related to the running of system commands
2630 2628 #-------------------------------------------------------------------------
2631 2629
2632 2630 def system_piped(self, cmd):
2633 2631 """Call the given cmd in a subprocess, piping stdout/err
2634 2632
2635 2633 Parameters
2636 2634 ----------
2637 2635 cmd : str
2638 2636 Command to execute (can not end in '&', as background processes are
2639 2637 not supported. Should not be a command that expects input
2640 2638 other than simple text.
2641 2639 """
2642 2640 if cmd.rstrip().endswith('&'):
2643 2641 # this is *far* from a rigorous test
2644 2642 # We do not support backgrounding processes because we either use
2645 2643 # pexpect or pipes to read from. Users can always just call
2646 2644 # os.system() or use ip.system=ip.system_raw
2647 2645 # if they really want a background process.
2648 2646 raise OSError("Background processes not supported.")
2649 2647
2650 2648 # we explicitly do NOT return the subprocess status code, because
2651 2649 # a non-None value would trigger :func:`sys.displayhook` calls.
2652 2650 # Instead, we store the exit_code in user_ns.
2653 2651 self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
2654 2652
2655 2653 def system_raw(self, cmd):
2656 2654 """Call the given cmd in a subprocess using os.system on Windows or
2657 2655 subprocess.call using the system shell on other platforms.
2658 2656
2659 2657 Parameters
2660 2658 ----------
2661 2659 cmd : str
2662 2660 Command to execute.
2663 2661 """
2664 2662 cmd = self.var_expand(cmd, depth=1)
2665 2663 # warn if there is an IPython magic alternative.
2666 2664 if cmd == "":
2667 2665 main_cmd = ""
2668 2666 else:
2669 2667 main_cmd = cmd.split()[0]
2670 2668 has_magic_alternatives = ("pip", "conda", "cd")
2671 2669
2672 2670 if main_cmd in has_magic_alternatives:
2673 2671 warnings.warn(
2674 2672 (
2675 2673 "You executed the system command !{0} which may not work "
2676 2674 "as expected. Try the IPython magic %{0} instead."
2677 2675 ).format(main_cmd)
2678 2676 )
2679 2677
2680 2678 # protect os.system from UNC paths on Windows, which it can't handle:
2681 2679 if sys.platform == 'win32':
2682 2680 from IPython.utils._process_win32 import AvoidUNCPath
2683 2681 with AvoidUNCPath() as path:
2684 2682 if path is not None:
2685 2683 cmd = '"pushd %s &&"%s' % (path, cmd)
2686 2684 try:
2687 2685 ec = os.system(cmd)
2688 2686 except KeyboardInterrupt:
2689 2687 print('\n' + self.get_exception_only(), file=sys.stderr)
2690 2688 ec = -2
2691 2689 else:
2692 2690 # For posix the result of the subprocess.call() below is an exit
2693 2691 # code, which by convention is zero for success, positive for
2694 2692 # program failure. Exit codes above 128 are reserved for signals,
2695 2693 # and the formula for converting a signal to an exit code is usually
2696 2694 # signal_number+128. To more easily differentiate between exit
2697 2695 # codes and signals, ipython uses negative numbers. For instance
2698 2696 # since control-c is signal 2 but exit code 130, ipython's
2699 2697 # _exit_code variable will read -2. Note that some shells like
2700 2698 # csh and fish don't follow sh/bash conventions for exit codes.
2701 2699 executable = os.environ.get('SHELL', None)
2702 2700 try:
2703 2701 # Use env shell instead of default /bin/sh
2704 2702 ec = subprocess.call(cmd, shell=True, executable=executable)
2705 2703 except KeyboardInterrupt:
2706 2704 # intercept control-C; a long traceback is not useful here
2707 2705 print('\n' + self.get_exception_only(), file=sys.stderr)
2708 2706 ec = 130
2709 2707 if ec > 128:
2710 2708 ec = -(ec - 128)
2711 2709
2712 2710 # We explicitly do NOT return the subprocess status code, because
2713 2711 # a non-None value would trigger :func:`sys.displayhook` calls.
2714 2712 # Instead, we store the exit_code in user_ns. Note the semantics
2715 2713 # of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
2716 2714 # but raising SystemExit(_exit_code) will give status 254!
2717 2715 self.user_ns['_exit_code'] = ec
2718 2716
2719 2717 # use piped system by default, because it is better behaved
2720 2718 system = system_piped
2721 2719
2722 2720 def getoutput(self, cmd, split=True, depth=0):
2723 2721 """Get output (possibly including stderr) from a subprocess.
2724 2722
2725 2723 Parameters
2726 2724 ----------
2727 2725 cmd : str
2728 2726 Command to execute (can not end in '&', as background processes are
2729 2727 not supported.
2730 2728 split : bool, optional
2731 2729 If True, split the output into an IPython SList. Otherwise, an
2732 2730 IPython LSString is returned. These are objects similar to normal
2733 2731 lists and strings, with a few convenience attributes for easier
2734 2732 manipulation of line-based output. You can use '?' on them for
2735 2733 details.
2736 2734 depth : int, optional
2737 2735 How many frames above the caller are the local variables which should
2738 2736 be expanded in the command string? The default (0) assumes that the
2739 2737 expansion variables are in the stack frame calling this function.
2740 2738 """
2741 2739 if cmd.rstrip().endswith('&'):
2742 2740 # this is *far* from a rigorous test
2743 2741 raise OSError("Background processes not supported.")
2744 2742 out = getoutput(self.var_expand(cmd, depth=depth+1))
2745 2743 if split:
2746 2744 out = SList(out.splitlines())
2747 2745 else:
2748 2746 out = LSString(out)
2749 2747 return out
2750 2748
2751 2749 #-------------------------------------------------------------------------
2752 2750 # Things related to aliases
2753 2751 #-------------------------------------------------------------------------
2754 2752
2755 2753 def init_alias(self):
2756 2754 self.alias_manager = AliasManager(shell=self, parent=self)
2757 2755 self.configurables.append(self.alias_manager)
2758 2756
2759 2757 #-------------------------------------------------------------------------
2760 2758 # Things related to extensions
2761 2759 #-------------------------------------------------------------------------
2762 2760
2763 2761 def init_extension_manager(self):
2764 2762 self.extension_manager = ExtensionManager(shell=self, parent=self)
2765 2763 self.configurables.append(self.extension_manager)
2766 2764
2767 2765 #-------------------------------------------------------------------------
2768 2766 # Things related to payloads
2769 2767 #-------------------------------------------------------------------------
2770 2768
2771 2769 def init_payload(self):
2772 2770 self.payload_manager = PayloadManager(parent=self)
2773 2771 self.configurables.append(self.payload_manager)
2774 2772
2775 2773 #-------------------------------------------------------------------------
2776 2774 # Things related to the prefilter
2777 2775 #-------------------------------------------------------------------------
2778 2776
2779 2777 def init_prefilter(self):
2780 2778 self.prefilter_manager = PrefilterManager(shell=self, parent=self)
2781 2779 self.configurables.append(self.prefilter_manager)
2782 2780 # Ultimately this will be refactored in the new interpreter code, but
2783 2781 # for now, we should expose the main prefilter method (there's legacy
2784 2782 # code out there that may rely on this).
2785 2783 self.prefilter = self.prefilter_manager.prefilter_lines
2786 2784
2787 2785 def auto_rewrite_input(self, cmd):
2788 2786 """Print to the screen the rewritten form of the user's command.
2789 2787
2790 2788 This shows visual feedback by rewriting input lines that cause
2791 2789 automatic calling to kick in, like::
2792 2790
2793 2791 /f x
2794 2792
2795 2793 into::
2796 2794
2797 2795 ------> f(x)
2798 2796
2799 2797 after the user's input prompt. This helps the user understand that the
2800 2798 input line was transformed automatically by IPython.
2801 2799 """
2802 2800 if not self.show_rewritten_input:
2803 2801 return
2804 2802
2805 2803 # This is overridden in TerminalInteractiveShell to use fancy prompts
2806 2804 print("------> " + cmd)
2807 2805
2808 2806 #-------------------------------------------------------------------------
2809 2807 # Things related to extracting values/expressions from kernel and user_ns
2810 2808 #-------------------------------------------------------------------------
2811 2809
2812 2810 def _user_obj_error(self):
2813 2811 """return simple exception dict
2814 2812
2815 2813 for use in user_expressions
2816 2814 """
2817 2815
2818 2816 etype, evalue, tb = self._get_exc_info()
2819 2817 stb = self.InteractiveTB.get_exception_only(etype, evalue)
2820 2818
2821 2819 exc_info = {
2822 2820 "status": "error",
2823 2821 "traceback": stb,
2824 2822 "ename": etype.__name__,
2825 2823 "evalue": py3compat.safe_unicode(evalue),
2826 2824 }
2827 2825
2828 2826 return exc_info
2829 2827
2830 2828 def _format_user_obj(self, obj):
2831 2829 """format a user object to display dict
2832 2830
2833 2831 for use in user_expressions
2834 2832 """
2835 2833
2836 2834 data, md = self.display_formatter.format(obj)
2837 2835 value = {
2838 2836 'status' : 'ok',
2839 2837 'data' : data,
2840 2838 'metadata' : md,
2841 2839 }
2842 2840 return value
2843 2841
2844 2842 def user_expressions(self, expressions):
2845 2843 """Evaluate a dict of expressions in the user's namespace.
2846 2844
2847 2845 Parameters
2848 2846 ----------
2849 2847 expressions : dict
2850 2848 A dict with string keys and string values. The expression values
2851 2849 should be valid Python expressions, each of which will be evaluated
2852 2850 in the user namespace.
2853 2851
2854 2852 Returns
2855 2853 -------
2856 2854 A dict, keyed like the input expressions dict, with the rich mime-typed
2857 2855 display_data of each value.
2858 2856 """
2859 2857 out = {}
2860 2858 user_ns = self.user_ns
2861 2859 global_ns = self.user_global_ns
2862 2860
2863 2861 for key, expr in expressions.items():
2864 2862 try:
2865 2863 value = self._format_user_obj(eval(expr, global_ns, user_ns))
2866 2864 except:
2867 2865 value = self._user_obj_error()
2868 2866 out[key] = value
2869 2867 return out
2870 2868
2871 2869 #-------------------------------------------------------------------------
2872 2870 # Things related to the running of code
2873 2871 #-------------------------------------------------------------------------
2874 2872
2875 2873 def ex(self, cmd):
2876 2874 """Execute a normal python statement in user namespace."""
2877 2875 with self.builtin_trap:
2878 2876 exec(cmd, self.user_global_ns, self.user_ns)
2879 2877
2880 2878 def ev(self, expr):
2881 2879 """Evaluate python expression expr in user namespace.
2882 2880
2883 2881 Returns the result of evaluation
2884 2882 """
2885 2883 with self.builtin_trap:
2886 2884 return eval(expr, self.user_global_ns, self.user_ns)
2887 2885
2888 2886 def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
2889 2887 """A safe version of the builtin execfile().
2890 2888
2891 2889 This version will never throw an exception, but instead print
2892 2890 helpful error messages to the screen. This only works on pure
2893 2891 Python files with the .py extension.
2894 2892
2895 2893 Parameters
2896 2894 ----------
2897 2895 fname : string
2898 2896 The name of the file to be executed.
2899 2897 *where : tuple
2900 2898 One or two namespaces, passed to execfile() as (globals,locals).
2901 2899 If only one is given, it is passed as both.
2902 2900 exit_ignore : bool (False)
2903 2901 If True, then silence SystemExit for non-zero status (it is always
2904 2902 silenced for zero status, as it is so common).
2905 2903 raise_exceptions : bool (False)
2906 2904 If True raise exceptions everywhere. Meant for testing.
2907 2905 shell_futures : bool (False)
2908 2906 If True, the code will share future statements with the interactive
2909 2907 shell. It will both be affected by previous __future__ imports, and
2910 2908 any __future__ imports in the code will affect the shell. If False,
2911 2909 __future__ imports are not shared in either direction.
2912 2910
2913 2911 """
2914 2912 fname = Path(fname).expanduser().resolve()
2915 2913
2916 2914 # Make sure we can open the file
2917 2915 try:
2918 2916 with fname.open("rb"):
2919 2917 pass
2920 2918 except:
2921 2919 warn('Could not open file <%s> for safe execution.' % fname)
2922 2920 return
2923 2921
2924 2922 # Find things also in current directory. This is needed to mimic the
2925 2923 # behavior of running a script from the system command line, where
2926 2924 # Python inserts the script's directory into sys.path
2927 2925 dname = str(fname.parent)
2928 2926
2929 2927 with prepended_to_syspath(dname), self.builtin_trap:
2930 2928 try:
2931 2929 glob, loc = (where + (None, ))[:2]
2932 2930 py3compat.execfile(
2933 2931 fname, glob, loc,
2934 2932 self.compile if shell_futures else None)
2935 2933 except SystemExit as status:
2936 2934 # If the call was made with 0 or None exit status (sys.exit(0)
2937 2935 # or sys.exit() ), don't bother showing a traceback, as both of
2938 2936 # these are considered normal by the OS:
2939 2937 # > python -c'import sys;sys.exit(0)'; echo $?
2940 2938 # 0
2941 2939 # > python -c'import sys;sys.exit()'; echo $?
2942 2940 # 0
2943 2941 # For other exit status, we show the exception unless
2944 2942 # explicitly silenced, but only in short form.
2945 2943 if status.code:
2946 2944 if raise_exceptions:
2947 2945 raise
2948 2946 if not exit_ignore:
2949 2947 self.showtraceback(exception_only=True)
2950 2948 except:
2951 2949 if raise_exceptions:
2952 2950 raise
2953 2951 # tb offset is 2 because we wrap execfile
2954 2952 self.showtraceback(tb_offset=2)
2955 2953
2956 2954 def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
2957 2955 """Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
2958 2956
2959 2957 Parameters
2960 2958 ----------
2961 2959 fname : str
2962 2960 The name of the file to execute. The filename must have a
2963 2961 .ipy or .ipynb extension.
2964 2962 shell_futures : bool (False)
2965 2963 If True, the code will share future statements with the interactive
2966 2964 shell. It will both be affected by previous __future__ imports, and
2967 2965 any __future__ imports in the code will affect the shell. If False,
2968 2966 __future__ imports are not shared in either direction.
2969 2967 raise_exceptions : bool (False)
2970 2968 If True raise exceptions everywhere. Meant for testing.
2971 2969 """
2972 2970 fname = Path(fname).expanduser().resolve()
2973 2971
2974 2972 # Make sure we can open the file
2975 2973 try:
2976 2974 with fname.open("rb"):
2977 2975 pass
2978 2976 except:
2979 2977 warn('Could not open file <%s> for safe execution.' % fname)
2980 2978 return
2981 2979
2982 2980 # Find things also in current directory. This is needed to mimic the
2983 2981 # behavior of running a script from the system command line, where
2984 2982 # Python inserts the script's directory into sys.path
2985 2983 dname = str(fname.parent)
2986 2984
2987 2985 def get_cells():
2988 2986 """generator for sequence of code blocks to run"""
2989 2987 if fname.suffix == ".ipynb":
2990 2988 from nbformat import read
2991 2989 nb = read(fname, as_version=4)
2992 2990 if not nb.cells:
2993 2991 return
2994 2992 for cell in nb.cells:
2995 2993 if cell.cell_type == 'code':
2996 2994 yield cell.source
2997 2995 else:
2998 2996 yield fname.read_text(encoding="utf-8")
2999 2997
3000 2998 with prepended_to_syspath(dname):
3001 2999 try:
3002 3000 for cell in get_cells():
3003 3001 result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
3004 3002 if raise_exceptions:
3005 3003 result.raise_error()
3006 3004 elif not result.success:
3007 3005 break
3008 3006 except:
3009 3007 if raise_exceptions:
3010 3008 raise
3011 3009 self.showtraceback()
3012 3010 warn('Unknown failure executing file: <%s>' % fname)
3013 3011
3014 3012 def safe_run_module(self, mod_name, where):
3015 3013 """A safe version of runpy.run_module().
3016 3014
3017 3015 This version will never throw an exception, but instead print
3018 3016 helpful error messages to the screen.
3019 3017
3020 3018 `SystemExit` exceptions with status code 0 or None are ignored.
3021 3019
3022 3020 Parameters
3023 3021 ----------
3024 3022 mod_name : string
3025 3023 The name of the module to be executed.
3026 3024 where : dict
3027 3025 The globals namespace.
3028 3026 """
3029 3027 try:
3030 3028 try:
3031 3029 where.update(
3032 3030 runpy.run_module(str(mod_name), run_name="__main__",
3033 3031 alter_sys=True)
3034 3032 )
3035 3033 except SystemExit as status:
3036 3034 if status.code:
3037 3035 raise
3038 3036 except:
3039 3037 self.showtraceback()
3040 3038 warn('Unknown failure executing module: <%s>' % mod_name)
3041 3039
3042 3040 def run_cell(
3043 3041 self,
3044 3042 raw_cell,
3045 3043 store_history=False,
3046 3044 silent=False,
3047 3045 shell_futures=True,
3048 3046 cell_id=None,
3049 3047 ):
3050 3048 """Run a complete IPython cell.
3051 3049
3052 3050 Parameters
3053 3051 ----------
3054 3052 raw_cell : str
3055 3053 The code (including IPython code such as %magic functions) to run.
3056 3054 store_history : bool
3057 3055 If True, the raw and translated cell will be stored in IPython's
3058 3056 history. For user code calling back into IPython's machinery, this
3059 3057 should be set to False.
3060 3058 silent : bool
3061 3059 If True, avoid side-effects, such as implicit displayhooks and
3062 3060 and logging. silent=True forces store_history=False.
3063 3061 shell_futures : bool
3064 3062 If True, the code will share future statements with the interactive
3065 3063 shell. It will both be affected by previous __future__ imports, and
3066 3064 any __future__ imports in the code will affect the shell. If False,
3067 3065 __future__ imports are not shared in either direction.
3068 3066
3069 3067 Returns
3070 3068 -------
3071 3069 result : :class:`ExecutionResult`
3072 3070 """
3073 3071 result = None
3074 3072 try:
3075 3073 result = self._run_cell(
3076 3074 raw_cell, store_history, silent, shell_futures, cell_id
3077 3075 )
3078 3076 finally:
3079 3077 self.events.trigger('post_execute')
3080 3078 if not silent:
3081 3079 self.events.trigger('post_run_cell', result)
3082 3080 return result
3083 3081
3084 3082 def _run_cell(
3085 3083 self,
3086 3084 raw_cell: str,
3087 3085 store_history: bool,
3088 3086 silent: bool,
3089 3087 shell_futures: bool,
3090 3088 cell_id: str,
3091 3089 ) -> ExecutionResult:
3092 3090 """Internal method to run a complete IPython cell."""
3093 3091
3094 3092 # we need to avoid calling self.transform_cell multiple time on the same thing
3095 3093 # so we need to store some results:
3096 3094 preprocessing_exc_tuple = None
3097 3095 try:
3098 3096 transformed_cell = self.transform_cell(raw_cell)
3099 3097 except Exception:
3100 3098 transformed_cell = raw_cell
3101 3099 preprocessing_exc_tuple = sys.exc_info()
3102 3100
3103 3101 assert transformed_cell is not None
3104 3102 coro = self.run_cell_async(
3105 3103 raw_cell,
3106 3104 store_history=store_history,
3107 3105 silent=silent,
3108 3106 shell_futures=shell_futures,
3109 3107 transformed_cell=transformed_cell,
3110 3108 preprocessing_exc_tuple=preprocessing_exc_tuple,
3111 3109 cell_id=cell_id,
3112 3110 )
3113 3111
3114 3112 # run_cell_async is async, but may not actually need an eventloop.
3115 3113 # when this is the case, we want to run it using the pseudo_sync_runner
3116 3114 # so that code can invoke eventloops (for example via the %run , and
3117 3115 # `%paste` magic.
3118 3116 if self.trio_runner:
3119 3117 runner = self.trio_runner
3120 3118 elif self.should_run_async(
3121 3119 raw_cell,
3122 3120 transformed_cell=transformed_cell,
3123 3121 preprocessing_exc_tuple=preprocessing_exc_tuple,
3124 3122 ):
3125 3123 runner = self.loop_runner
3126 3124 else:
3127 3125 runner = _pseudo_sync_runner
3128 3126
3129 3127 try:
3130 3128 result = runner(coro)
3131 3129 except BaseException as e:
3132 3130 info = ExecutionInfo(
3133 3131 raw_cell, store_history, silent, shell_futures, cell_id
3134 3132 )
3135 3133 result = ExecutionResult(info)
3136 3134 result.error_in_exec = e
3137 3135 self.showtraceback(running_compiled_code=True)
3138 3136 finally:
3139 3137 return result
3140 3138
3141 3139 def should_run_async(
3142 3140 self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
3143 3141 ) -> bool:
3144 3142 """Return whether a cell should be run asynchronously via a coroutine runner
3145 3143
3146 3144 Parameters
3147 3145 ----------
3148 3146 raw_cell : str
3149 3147 The code to be executed
3150 3148
3151 3149 Returns
3152 3150 -------
3153 3151 result: bool
3154 3152 Whether the code needs to be run with a coroutine runner or not
3155 3153 .. versionadded:: 7.0
3156 3154 """
3157 3155 if not self.autoawait:
3158 3156 return False
3159 3157 if preprocessing_exc_tuple is not None:
3160 3158 return False
3161 3159 assert preprocessing_exc_tuple is None
3162 3160 if transformed_cell is None:
3163 3161 warnings.warn(
3164 3162 "`should_run_async` will not call `transform_cell`"
3165 3163 " automatically in the future. Please pass the result to"
3166 3164 " `transformed_cell` argument and any exception that happen"
3167 3165 " during the"
3168 3166 "transform in `preprocessing_exc_tuple` in"
3169 3167 " IPython 7.17 and above.",
3170 3168 DeprecationWarning,
3171 3169 stacklevel=2,
3172 3170 )
3173 3171 try:
3174 3172 cell = self.transform_cell(raw_cell)
3175 3173 except Exception:
3176 3174 # any exception during transform will be raised
3177 3175 # prior to execution
3178 3176 return False
3179 3177 else:
3180 3178 cell = transformed_cell
3181 3179 return _should_be_async(cell)
3182 3180
3183 3181 async def run_cell_async(
3184 3182 self,
3185 3183 raw_cell: str,
3186 3184 store_history=False,
3187 3185 silent=False,
3188 3186 shell_futures=True,
3189 3187 *,
3190 3188 transformed_cell: Optional[str] = None,
3191 3189 preprocessing_exc_tuple: Optional[AnyType] = None,
3192 3190 cell_id=None,
3193 3191 ) -> ExecutionResult:
3194 3192 """Run a complete IPython cell asynchronously.
3195 3193
3196 3194 Parameters
3197 3195 ----------
3198 3196 raw_cell : str
3199 3197 The code (including IPython code such as %magic functions) to run.
3200 3198 store_history : bool
3201 3199 If True, the raw and translated cell will be stored in IPython's
3202 3200 history. For user code calling back into IPython's machinery, this
3203 3201 should be set to False.
3204 3202 silent : bool
3205 3203 If True, avoid side-effects, such as implicit displayhooks and
3206 3204 and logging. silent=True forces store_history=False.
3207 3205 shell_futures : bool
3208 3206 If True, the code will share future statements with the interactive
3209 3207 shell. It will both be affected by previous __future__ imports, and
3210 3208 any __future__ imports in the code will affect the shell. If False,
3211 3209 __future__ imports are not shared in either direction.
3212 3210 transformed_cell: str
3213 3211 cell that was passed through transformers
3214 3212 preprocessing_exc_tuple:
3215 3213 trace if the transformation failed.
3216 3214
3217 3215 Returns
3218 3216 -------
3219 3217 result : :class:`ExecutionResult`
3220 3218
3221 3219 .. versionadded:: 7.0
3222 3220 """
3223 3221 info = ExecutionInfo(raw_cell, store_history, silent, shell_futures, cell_id)
3224 3222 result = ExecutionResult(info)
3225 3223
3226 3224 if (not raw_cell) or raw_cell.isspace():
3227 3225 self.last_execution_succeeded = True
3228 3226 self.last_execution_result = result
3229 3227 return result
3230 3228
3231 3229 if silent:
3232 3230 store_history = False
3233 3231
3234 3232 if store_history:
3235 3233 result.execution_count = self.execution_count
3236 3234
3237 3235 def error_before_exec(value):
3238 3236 if store_history:
3239 3237 self.execution_count += 1
3240 3238 result.error_before_exec = value
3241 3239 self.last_execution_succeeded = False
3242 3240 self.last_execution_result = result
3243 3241 return result
3244 3242
3245 3243 self.events.trigger('pre_execute')
3246 3244 if not silent:
3247 3245 self.events.trigger('pre_run_cell', info)
3248 3246
3249 3247 if transformed_cell is None:
3250 3248 warnings.warn(
3251 3249 "`run_cell_async` will not call `transform_cell`"
3252 3250 " automatically in the future. Please pass the result to"
3253 3251 " `transformed_cell` argument and any exception that happen"
3254 3252 " during the"
3255 3253 "transform in `preprocessing_exc_tuple` in"
3256 3254 " IPython 7.17 and above.",
3257 3255 DeprecationWarning,
3258 3256 stacklevel=2,
3259 3257 )
3260 3258 # If any of our input transformation (input_transformer_manager or
3261 3259 # prefilter_manager) raises an exception, we store it in this variable
3262 3260 # so that we can display the error after logging the input and storing
3263 3261 # it in the history.
3264 3262 try:
3265 3263 cell = self.transform_cell(raw_cell)
3266 3264 except Exception:
3267 3265 preprocessing_exc_tuple = sys.exc_info()
3268 3266 cell = raw_cell # cell has to exist so it can be stored/logged
3269 3267 else:
3270 3268 preprocessing_exc_tuple = None
3271 3269 else:
3272 3270 if preprocessing_exc_tuple is None:
3273 3271 cell = transformed_cell
3274 3272 else:
3275 3273 cell = raw_cell
3276 3274
3277 3275 # Do NOT store paste/cpaste magic history
3278 3276 if "get_ipython().run_line_magic(" in cell and "paste" in cell:
3279 3277 store_history = False
3280 3278
3281 3279 # Store raw and processed history
3282 3280 if store_history:
3283 3281 assert self.history_manager is not None
3284 3282 self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
3285 3283 if not silent:
3286 3284 self.logger.log(cell, raw_cell)
3287 3285
3288 3286 # Display the exception if input processing failed.
3289 3287 if preprocessing_exc_tuple is not None:
3290 3288 self.showtraceback(preprocessing_exc_tuple)
3291 3289 if store_history:
3292 3290 self.execution_count += 1
3293 3291 return error_before_exec(preprocessing_exc_tuple[1])
3294 3292
3295 3293 # Our own compiler remembers the __future__ environment. If we want to
3296 3294 # run code with a separate __future__ environment, use the default
3297 3295 # compiler
3298 3296 compiler = self.compile if shell_futures else self.compiler_class()
3299 3297
3300 3298 with self.builtin_trap:
3301 3299 cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
3302 3300
3303 3301 with self.display_trap:
3304 3302 # Compile to bytecode
3305 3303 try:
3306 3304 code_ast = compiler.ast_parse(cell, filename=cell_name)
3307 3305 except self.custom_exceptions as e:
3308 3306 etype, value, tb = sys.exc_info()
3309 3307 self.CustomTB(etype, value, tb)
3310 3308 return error_before_exec(e)
3311 3309 except IndentationError as e:
3312 3310 self.showindentationerror()
3313 3311 return error_before_exec(e)
3314 3312 except (OverflowError, SyntaxError, ValueError, TypeError,
3315 3313 MemoryError) as e:
3316 3314 self.showsyntaxerror()
3317 3315 return error_before_exec(e)
3318 3316
3319 3317 # Apply AST transformations
3320 3318 try:
3321 3319 code_ast = self.transform_ast(code_ast)
3322 3320 except InputRejected as e:
3323 3321 self.showtraceback()
3324 3322 return error_before_exec(e)
3325 3323
3326 3324 # Give the displayhook a reference to our ExecutionResult so it
3327 3325 # can fill in the output value.
3328 3326 self.displayhook.exec_result = result
3329 3327
3330 3328 # Execute the user code
3331 3329 interactivity = "none" if silent else self.ast_node_interactivity
3332 3330
3333 3331
3334 3332 has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
3335 3333 interactivity=interactivity, compiler=compiler, result=result)
3336 3334
3337 3335 self.last_execution_succeeded = not has_raised
3338 3336 self.last_execution_result = result
3339 3337
3340 3338 # Reset this so later displayed values do not modify the
3341 3339 # ExecutionResult
3342 3340 self.displayhook.exec_result = None
3343 3341
3344 3342 if store_history:
3345 3343 assert self.history_manager is not None
3346 3344 # Write output to the database. Does nothing unless
3347 3345 # history output logging is enabled.
3348 3346 self.history_manager.store_output(self.execution_count)
3349 3347 # Each cell is a *single* input, regardless of how many lines it has
3350 3348 self.execution_count += 1
3351 3349
3352 3350 return result
3353 3351
3354 3352 def transform_cell(self, raw_cell):
3355 3353 """Transform an input cell before parsing it.
3356 3354
3357 3355 Static transformations, implemented in IPython.core.inputtransformer2,
3358 3356 deal with things like ``%magic`` and ``!system`` commands.
3359 3357 These run on all input.
3360 3358 Dynamic transformations, for things like unescaped magics and the exit
3361 3359 autocall, depend on the state of the interpreter.
3362 3360 These only apply to single line inputs.
3363 3361
3364 3362 These string-based transformations are followed by AST transformations;
3365 3363 see :meth:`transform_ast`.
3366 3364 """
3367 3365 # Static input transformations
3368 3366 cell = self.input_transformer_manager.transform_cell(raw_cell)
3369 3367
3370 3368 if len(cell.splitlines()) == 1:
3371 3369 # Dynamic transformations - only applied for single line commands
3372 3370 with self.builtin_trap:
3373 3371 # use prefilter_lines to handle trailing newlines
3374 3372 # restore trailing newline for ast.parse
3375 3373 cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
3376 3374
3377 3375 lines = cell.splitlines(keepends=True)
3378 3376 for transform in self.input_transformers_post:
3379 3377 lines = transform(lines)
3380 3378 cell = ''.join(lines)
3381 3379
3382 3380 return cell
3383 3381
3384 3382 def transform_ast(self, node):
3385 3383 """Apply the AST transformations from self.ast_transformers
3386 3384
3387 3385 Parameters
3388 3386 ----------
3389 3387 node : ast.Node
3390 3388 The root node to be transformed. Typically called with the ast.Module
3391 3389 produced by parsing user input.
3392 3390
3393 3391 Returns
3394 3392 -------
3395 3393 An ast.Node corresponding to the node it was called with. Note that it
3396 3394 may also modify the passed object, so don't rely on references to the
3397 3395 original AST.
3398 3396 """
3399 3397 for transformer in self.ast_transformers:
3400 3398 try:
3401 3399 node = transformer.visit(node)
3402 3400 except InputRejected:
3403 3401 # User-supplied AST transformers can reject an input by raising
3404 3402 # an InputRejected. Short-circuit in this case so that we
3405 3403 # don't unregister the transform.
3406 3404 raise
3407 3405 except Exception as e:
3408 3406 warn(
3409 3407 "AST transformer %r threw an error. It will be unregistered. %s"
3410 3408 % (transformer, e)
3411 3409 )
3412 3410 self.ast_transformers.remove(transformer)
3413 3411
3414 3412 if self.ast_transformers:
3415 3413 ast.fix_missing_locations(node)
3416 3414 return node
3417 3415
3418 3416 async def run_ast_nodes(
3419 3417 self,
3420 3418 nodelist: ListType[stmt],
3421 3419 cell_name: str,
3422 3420 interactivity="last_expr",
3423 3421 compiler=compile,
3424 3422 result=None,
3425 3423 ):
3426 3424 """Run a sequence of AST nodes. The execution mode depends on the
3427 3425 interactivity parameter.
3428 3426
3429 3427 Parameters
3430 3428 ----------
3431 3429 nodelist : list
3432 3430 A sequence of AST nodes to run.
3433 3431 cell_name : str
3434 3432 Will be passed to the compiler as the filename of the cell. Typically
3435 3433 the value returned by ip.compile.cache(cell).
3436 3434 interactivity : str
3437 3435 'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
3438 3436 specifying which nodes should be run interactively (displaying output
3439 3437 from expressions). 'last_expr' will run the last node interactively
3440 3438 only if it is an expression (i.e. expressions in loops or other blocks
3441 3439 are not displayed) 'last_expr_or_assign' will run the last expression
3442 3440 or the last assignment. Other values for this parameter will raise a
3443 3441 ValueError.
3444 3442
3445 3443 compiler : callable
3446 3444 A function with the same interface as the built-in compile(), to turn
3447 3445 the AST nodes into code objects. Default is the built-in compile().
3448 3446 result : ExecutionResult, optional
3449 3447 An object to store exceptions that occur during execution.
3450 3448
3451 3449 Returns
3452 3450 -------
3453 3451 True if an exception occurred while running code, False if it finished
3454 3452 running.
3455 3453 """
3456 3454 if not nodelist:
3457 3455 return
3458 3456
3459 3457
3460 3458 if interactivity == 'last_expr_or_assign':
3461 3459 if isinstance(nodelist[-1], _assign_nodes):
3462 3460 asg = nodelist[-1]
3463 3461 if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
3464 3462 target = asg.targets[0]
3465 3463 elif isinstance(asg, _single_targets_nodes):
3466 3464 target = asg.target
3467 3465 else:
3468 3466 target = None
3469 3467 if isinstance(target, ast.Name):
3470 3468 nnode = ast.Expr(ast.Name(target.id, ast.Load()))
3471 3469 ast.fix_missing_locations(nnode)
3472 3470 nodelist.append(nnode)
3473 3471 interactivity = 'last_expr'
3474 3472
3475 3473 _async = False
3476 3474 if interactivity == 'last_expr':
3477 3475 if isinstance(nodelist[-1], ast.Expr):
3478 3476 interactivity = "last"
3479 3477 else:
3480 3478 interactivity = "none"
3481 3479
3482 3480 if interactivity == 'none':
3483 3481 to_run_exec, to_run_interactive = nodelist, []
3484 3482 elif interactivity == 'last':
3485 3483 to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
3486 3484 elif interactivity == 'all':
3487 3485 to_run_exec, to_run_interactive = [], nodelist
3488 3486 else:
3489 3487 raise ValueError("Interactivity was %r" % interactivity)
3490 3488
3491 3489 try:
3492 3490
3493 3491 def compare(code):
3494 3492 is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
3495 3493 return is_async
3496 3494
3497 3495 # refactor that to just change the mod constructor.
3498 3496 to_run = []
3499 3497 for node in to_run_exec:
3500 3498 to_run.append((node, "exec"))
3501 3499
3502 3500 for node in to_run_interactive:
3503 3501 to_run.append((node, "single"))
3504 3502
3505 3503 for node, mode in to_run:
3506 3504 if mode == "exec":
3507 3505 mod = Module([node], [])
3508 3506 elif mode == "single":
3509 3507 mod = ast.Interactive([node]) # type: ignore
3510 3508 with compiler.extra_flags(
3511 3509 getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
3512 3510 if self.autoawait
3513 3511 else 0x0
3514 3512 ):
3515 3513 code = compiler(mod, cell_name, mode)
3516 3514 asy = compare(code)
3517 3515 if await self.run_code(code, result, async_=asy):
3518 3516 return True
3519 3517
3520 3518 # Flush softspace
3521 3519 if softspace(sys.stdout, 0):
3522 3520 print()
3523 3521
3524 3522 except:
3525 3523 # It's possible to have exceptions raised here, typically by
3526 3524 # compilation of odd code (such as a naked 'return' outside a
3527 3525 # function) that did parse but isn't valid. Typically the exception
3528 3526 # is a SyntaxError, but it's safest just to catch anything and show
3529 3527 # the user a traceback.
3530 3528
3531 3529 # We do only one try/except outside the loop to minimize the impact
3532 3530 # on runtime, and also because if any node in the node list is
3533 3531 # broken, we should stop execution completely.
3534 3532 if result:
3535 3533 result.error_before_exec = sys.exc_info()[1]
3536 3534 self.showtraceback()
3537 3535 return True
3538 3536
3539 3537 return False
3540 3538
3541 3539 async def run_code(self, code_obj, result=None, *, async_=False):
3542 3540 """Execute a code object.
3543 3541
3544 3542 When an exception occurs, self.showtraceback() is called to display a
3545 3543 traceback.
3546 3544
3547 3545 Parameters
3548 3546 ----------
3549 3547 code_obj : code object
3550 3548 A compiled code object, to be executed
3551 3549 result : ExecutionResult, optional
3552 3550 An object to store exceptions that occur during execution.
3553 3551 async_ : Bool (Experimental)
3554 3552 Attempt to run top-level asynchronous code in a default loop.
3555 3553
3556 3554 Returns
3557 3555 -------
3558 3556 False : successful execution.
3559 3557 True : an error occurred.
3560 3558 """
3561 3559 # special value to say that anything above is IPython and should be
3562 3560 # hidden.
3563 3561 __tracebackhide__ = "__ipython_bottom__"
3564 3562 # Set our own excepthook in case the user code tries to call it
3565 3563 # directly, so that the IPython crash handler doesn't get triggered
3566 3564 old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
3567 3565
3568 3566 # we save the original sys.excepthook in the instance, in case config
3569 3567 # code (such as magics) needs access to it.
3570 3568 self.sys_excepthook = old_excepthook
3571 3569 outflag = True # happens in more places, so it's easier as default
3572 3570 try:
3573 3571 try:
3574 3572 if async_:
3575 3573 await eval(code_obj, self.user_global_ns, self.user_ns)
3576 3574 else:
3577 3575 exec(code_obj, self.user_global_ns, self.user_ns)
3578 3576 finally:
3579 3577 # Reset our crash handler in place
3580 3578 sys.excepthook = old_excepthook
3581 3579 except SystemExit as e:
3582 3580 if result is not None:
3583 3581 result.error_in_exec = e
3584 3582 self.showtraceback(exception_only=True)
3585 3583 warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
3586 3584 except bdb.BdbQuit:
3587 3585 etype, value, tb = sys.exc_info()
3588 3586 if result is not None:
3589 3587 result.error_in_exec = value
3590 3588 # the BdbQuit stops here
3591 3589 except self.custom_exceptions:
3592 3590 etype, value, tb = sys.exc_info()
3593 3591 if result is not None:
3594 3592 result.error_in_exec = value
3595 3593 self.CustomTB(etype, value, tb)
3596 3594 except:
3597 3595 if result is not None:
3598 3596 result.error_in_exec = sys.exc_info()[1]
3599 3597 self.showtraceback(running_compiled_code=True)
3600 3598 else:
3601 3599 outflag = False
3602 3600 return outflag
3603 3601
3604 3602 # For backwards compatibility
3605 3603 runcode = run_code
3606 3604
3607 3605 def check_complete(self, code: str) -> Tuple[str, str]:
3608 3606 """Return whether a block of code is ready to execute, or should be continued
3609 3607
3610 3608 Parameters
3611 3609 ----------
3612 3610 code : string
3613 3611 Python input code, which can be multiline.
3614 3612
3615 3613 Returns
3616 3614 -------
3617 3615 status : str
3618 3616 One of 'complete', 'incomplete', or 'invalid' if source is not a
3619 3617 prefix of valid code.
3620 3618 indent : str
3621 3619 When status is 'incomplete', this is some whitespace to insert on
3622 3620 the next line of the prompt.
3623 3621 """
3624 3622 status, nspaces = self.input_transformer_manager.check_complete(code)
3625 3623 return status, ' ' * (nspaces or 0)
3626 3624
3627 3625 #-------------------------------------------------------------------------
3628 3626 # Things related to GUI support and pylab
3629 3627 #-------------------------------------------------------------------------
3630 3628
3631 3629 active_eventloop: Optional[str] = None
3632 3630
3633 3631 def enable_gui(self, gui=None):
3634 3632 raise NotImplementedError('Implement enable_gui in a subclass')
3635 3633
3636 3634 def enable_matplotlib(self, gui=None):
3637 3635 """Enable interactive matplotlib and inline figure support.
3638 3636
3639 3637 This takes the following steps:
3640 3638
3641 3639 1. select the appropriate eventloop and matplotlib backend
3642 3640 2. set up matplotlib for interactive use with that backend
3643 3641 3. configure formatters for inline figure display
3644 3642 4. enable the selected gui eventloop
3645 3643
3646 3644 Parameters
3647 3645 ----------
3648 3646 gui : optional, string
3649 3647 If given, dictates the choice of matplotlib GUI backend to use
3650 3648 (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
3651 3649 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
3652 3650 matplotlib (as dictated by the matplotlib build-time options plus the
3653 3651 user's matplotlibrc configuration file). Note that not all backends
3654 3652 make sense in all contexts, for example a terminal ipython can't
3655 3653 display figures inline.
3656 3654 """
3657 3655 from .pylabtools import _matplotlib_manages_backends
3658 3656
3659 3657 if not _matplotlib_manages_backends() and gui in (None, "auto"):
3660 3658 # Early import of backend_inline required for its side effect of
3661 3659 # calling _enable_matplotlib_integration()
3662 3660 import matplotlib_inline.backend_inline
3663 3661
3664 3662 from IPython.core import pylabtools as pt
3665 3663 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
3666 3664
3667 3665 if gui != None:
3668 3666 # If we have our first gui selection, store it
3669 3667 if self.pylab_gui_select is None:
3670 3668 self.pylab_gui_select = gui
3671 3669 # Otherwise if they are different
3672 3670 elif gui != self.pylab_gui_select:
3673 3671 print('Warning: Cannot change to a different GUI toolkit: %s.'
3674 3672 ' Using %s instead.' % (gui, self.pylab_gui_select))
3675 3673 gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
3676 3674
3677 3675 pt.activate_matplotlib(backend)
3678 3676
3679 3677 from matplotlib_inline.backend_inline import configure_inline_support
3680 3678
3681 3679 configure_inline_support(self, backend)
3682 3680
3683 3681 # Now we must activate the gui pylab wants to use, and fix %run to take
3684 3682 # plot updates into account
3685 3683 self.enable_gui(gui)
3686 3684 self.magics_manager.registry['ExecutionMagics'].default_runner = \
3687 3685 pt.mpl_runner(self.safe_execfile)
3688 3686
3689 3687 return gui, backend
3690 3688
3691 3689 def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
3692 3690 """Activate pylab support at runtime.
3693 3691
3694 3692 This turns on support for matplotlib, preloads into the interactive
3695 3693 namespace all of numpy and pylab, and configures IPython to correctly
3696 3694 interact with the GUI event loop. The GUI backend to be used can be
3697 3695 optionally selected with the optional ``gui`` argument.
3698 3696
3699 3697 This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
3700 3698
3701 3699 Parameters
3702 3700 ----------
3703 3701 gui : optional, string
3704 3702 If given, dictates the choice of matplotlib GUI backend to use
3705 3703 (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
3706 3704 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
3707 3705 matplotlib (as dictated by the matplotlib build-time options plus the
3708 3706 user's matplotlibrc configuration file). Note that not all backends
3709 3707 make sense in all contexts, for example a terminal ipython can't
3710 3708 display figures inline.
3711 3709 import_all : optional, bool, default: True
3712 3710 Whether to do `from numpy import *` and `from pylab import *`
3713 3711 in addition to module imports.
3714 3712 welcome_message : deprecated
3715 3713 This argument is ignored, no welcome message will be displayed.
3716 3714 """
3717 3715 from IPython.core.pylabtools import import_pylab
3718 3716
3719 3717 gui, backend = self.enable_matplotlib(gui)
3720 3718
3721 3719 # We want to prevent the loading of pylab to pollute the user's
3722 3720 # namespace as shown by the %who* magics, so we execute the activation
3723 3721 # code in an empty namespace, and we update *both* user_ns and
3724 3722 # user_ns_hidden with this information.
3725 3723 ns = {}
3726 3724 import_pylab(ns, import_all)
3727 3725 # warn about clobbered names
3728 3726 ignored = {"__builtins__"}
3729 3727 both = set(ns).intersection(self.user_ns).difference(ignored)
3730 3728 clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
3731 3729 self.user_ns.update(ns)
3732 3730 self.user_ns_hidden.update(ns)
3733 3731 return gui, backend, clobbered
3734 3732
3735 3733 #-------------------------------------------------------------------------
3736 3734 # Utilities
3737 3735 #-------------------------------------------------------------------------
3738 3736
3739 3737 def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
3740 3738 """Expand python variables in a string.
3741 3739
3742 3740 The depth argument indicates how many frames above the caller should
3743 3741 be walked to look for the local namespace where to expand variables.
3744 3742
3745 3743 The global namespace for expansion is always the user's interactive
3746 3744 namespace.
3747 3745 """
3748 3746 ns = self.user_ns.copy()
3749 3747 try:
3750 3748 frame = sys._getframe(depth+1)
3751 3749 except ValueError:
3752 3750 # This is thrown if there aren't that many frames on the stack,
3753 3751 # e.g. if a script called run_line_magic() directly.
3754 3752 pass
3755 3753 else:
3756 3754 ns.update(frame.f_locals)
3757 3755
3758 3756 try:
3759 3757 # We have to use .vformat() here, because 'self' is a valid and common
3760 3758 # name, and expanding **ns for .format() would make it collide with
3761 3759 # the 'self' argument of the method.
3762 3760 cmd = formatter.vformat(cmd, args=[], kwargs=ns)
3763 3761 except Exception:
3764 3762 # if formatter couldn't format, just let it go untransformed
3765 3763 pass
3766 3764 return cmd
3767 3765
3768 3766 def mktempfile(self, data=None, prefix='ipython_edit_'):
3769 3767 """Make a new tempfile and return its filename.
3770 3768
3771 3769 This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
3772 3770 but it registers the created filename internally so ipython cleans it up
3773 3771 at exit time.
3774 3772
3775 3773 Optional inputs:
3776 3774
3777 3775 - data(None): if data is given, it gets written out to the temp file
3778 3776 immediately, and the file is closed again."""
3779 3777
3780 3778 dir_path = Path(tempfile.mkdtemp(prefix=prefix))
3781 3779 self.tempdirs.append(dir_path)
3782 3780
3783 3781 handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
3784 3782 os.close(handle) # On Windows, there can only be one open handle on a file
3785 3783
3786 3784 file_path = Path(filename)
3787 3785 self.tempfiles.append(file_path)
3788 3786
3789 3787 if data:
3790 3788 file_path.write_text(data, encoding="utf-8")
3791 3789 return filename
3792 3790
3793 3791 def ask_yes_no(self, prompt, default=None, interrupt=None):
3794 3792 if self.quiet:
3795 3793 return True
3796 3794 return ask_yes_no(prompt,default,interrupt)
3797 3795
3798 3796 def show_usage(self):
3799 3797 """Show a usage message"""
3800 3798 page.page(IPython.core.usage.interactive_usage)
3801 3799
3802 3800 def extract_input_lines(self, range_str, raw=False):
3803 3801 """Return as a string a set of input history slices.
3804 3802
3805 3803 Parameters
3806 3804 ----------
3807 3805 range_str : str
3808 3806 The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
3809 3807 since this function is for use by magic functions which get their
3810 3808 arguments as strings. The number before the / is the session
3811 3809 number: ~n goes n back from the current session.
3812 3810
3813 3811 If empty string is given, returns history of current session
3814 3812 without the last input.
3815 3813
3816 3814 raw : bool, optional
3817 3815 By default, the processed input is used. If this is true, the raw
3818 3816 input history is used instead.
3819 3817
3820 3818 Notes
3821 3819 -----
3822 3820 Slices can be described with two notations:
3823 3821
3824 3822 * ``N:M`` -> standard python form, means including items N...(M-1).
3825 3823 * ``N-M`` -> include items N..M (closed endpoint).
3826 3824 """
3827 3825 lines = self.history_manager.get_range_by_str(range_str, raw=raw)
3828 3826 text = "\n".join(x for _, _, x in lines)
3829 3827
3830 3828 # Skip the last line, as it's probably the magic that called this
3831 3829 if not range_str:
3832 3830 if "\n" not in text:
3833 3831 text = ""
3834 3832 else:
3835 3833 text = text[: text.rfind("\n")]
3836 3834
3837 3835 return text
3838 3836
3839 3837 def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
3840 3838 """Get a code string from history, file, url, or a string or macro.
3841 3839
3842 3840 This is mainly used by magic functions.
3843 3841
3844 3842 Parameters
3845 3843 ----------
3846 3844 target : str
3847 3845 A string specifying code to retrieve. This will be tried respectively
3848 3846 as: ranges of input history (see %history for syntax), url,
3849 3847 corresponding .py file, filename, or an expression evaluating to a
3850 3848 string or Macro in the user namespace.
3851 3849
3852 3850 If empty string is given, returns complete history of current
3853 3851 session, without the last line.
3854 3852
3855 3853 raw : bool
3856 3854 If true (default), retrieve raw history. Has no effect on the other
3857 3855 retrieval mechanisms.
3858 3856
3859 3857 py_only : bool (default False)
3860 3858 Only try to fetch python code, do not try alternative methods to decode file
3861 3859 if unicode fails.
3862 3860
3863 3861 Returns
3864 3862 -------
3865 3863 A string of code.
3866 3864 ValueError is raised if nothing is found, and TypeError if it evaluates
3867 3865 to an object of another type. In each case, .args[0] is a printable
3868 3866 message.
3869 3867 """
3870 3868 code = self.extract_input_lines(target, raw=raw) # Grab history
3871 3869 if code:
3872 3870 return code
3873 3871 try:
3874 3872 if target.startswith(('http://', 'https://')):
3875 3873 return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
3876 3874 except UnicodeDecodeError as e:
3877 3875 if not py_only :
3878 3876 # Deferred import
3879 3877 from urllib.request import urlopen
3880 3878 response = urlopen(target)
3881 3879 return response.read().decode('latin1')
3882 3880 raise ValueError(("'%s' seem to be unreadable.") % target) from e
3883 3881
3884 3882 potential_target = [target]
3885 3883 try :
3886 3884 potential_target.insert(0,get_py_filename(target))
3887 3885 except IOError:
3888 3886 pass
3889 3887
3890 3888 for tgt in potential_target :
3891 3889 if os.path.isfile(tgt): # Read file
3892 3890 try :
3893 3891 return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
3894 3892 except UnicodeDecodeError as e:
3895 3893 if not py_only :
3896 3894 with io_open(tgt,'r', encoding='latin1') as f :
3897 3895 return f.read()
3898 3896 raise ValueError(("'%s' seem to be unreadable.") % target) from e
3899 3897 elif os.path.isdir(os.path.expanduser(tgt)):
3900 3898 raise ValueError("'%s' is a directory, not a regular file." % target)
3901 3899
3902 3900 if search_ns:
3903 3901 # Inspect namespace to load object source
3904 3902 object_info = self.object_inspect(target, detail_level=1)
3905 3903 if object_info['found'] and object_info['source']:
3906 3904 return object_info['source']
3907 3905
3908 3906 try: # User namespace
3909 3907 codeobj = eval(target, self.user_ns)
3910 3908 except Exception as e:
3911 3909 raise ValueError(("'%s' was not found in history, as a file, url, "
3912 3910 "nor in the user namespace.") % target) from e
3913 3911
3914 3912 if isinstance(codeobj, str):
3915 3913 return codeobj
3916 3914 elif isinstance(codeobj, Macro):
3917 3915 return codeobj.value
3918 3916
3919 3917 raise TypeError("%s is neither a string nor a macro." % target,
3920 3918 codeobj)
3921 3919
3922 3920 def _atexit_once(self):
3923 3921 """
3924 3922 At exist operation that need to be called at most once.
3925 3923 Second call to this function per instance will do nothing.
3926 3924 """
3927 3925
3928 3926 if not getattr(self, "_atexit_once_called", False):
3929 3927 self._atexit_once_called = True
3930 3928 # Clear all user namespaces to release all references cleanly.
3931 3929 self.reset(new_session=False)
3932 3930 # Close the history session (this stores the end time and line count)
3933 3931 # this must be *before* the tempfile cleanup, in case of temporary
3934 3932 # history db
3935 3933 self.history_manager.end_session()
3936 3934 self.history_manager = None
3937 3935
3938 3936 #-------------------------------------------------------------------------
3939 3937 # Things related to IPython exiting
3940 3938 #-------------------------------------------------------------------------
3941 3939 def atexit_operations(self):
3942 3940 """This will be executed at the time of exit.
3943 3941
3944 3942 Cleanup operations and saving of persistent data that is done
3945 3943 unconditionally by IPython should be performed here.
3946 3944
3947 3945 For things that may depend on startup flags or platform specifics (such
3948 3946 as having readline or not), register a separate atexit function in the
3949 3947 code that has the appropriate information, rather than trying to
3950 3948 clutter
3951 3949 """
3952 3950 self._atexit_once()
3953 3951
3954 3952 # Cleanup all tempfiles and folders left around
3955 3953 for tfile in self.tempfiles:
3956 3954 try:
3957 3955 tfile.unlink()
3958 3956 self.tempfiles.remove(tfile)
3959 3957 except FileNotFoundError:
3960 3958 pass
3961 3959 del self.tempfiles
3962 3960 for tdir in self.tempdirs:
3963 3961 try:
3964 3962 shutil.rmtree(tdir)
3965 3963 self.tempdirs.remove(tdir)
3966 3964 except FileNotFoundError:
3967 3965 pass
3968 3966 del self.tempdirs
3969 3967
3970 3968 # Restore user's cursor
3971 3969 if hasattr(self, "editing_mode") and self.editing_mode == "vi":
3972 3970 sys.stdout.write("\x1b[0 q")
3973 3971 sys.stdout.flush()
3974 3972
3975 3973 def cleanup(self):
3976 3974 self.restore_sys_module_state()
3977 3975
3978 3976
3979 3977 # Overridden in terminal subclass to change prompts
3980 3978 def switch_doctest_mode(self, mode):
3981 3979 pass
3982 3980
3983 3981
3984 3982 class InteractiveShellABC(metaclass=abc.ABCMeta):
3985 3983 """An abstract base class for InteractiveShell."""
3986 3984
3987 3985 InteractiveShellABC.register(InteractiveShell)
@@ -1,112 +1,108
1 1 import unittest
2 2 import re
3 3 from IPython.utils.capture import capture_output
4 4 import sys
5 5 import pytest
6 6 from tempfile import TemporaryDirectory
7 7 from IPython.testing import tools as tt
8 8
9 9
10 10 def _exceptiongroup_common(
11 11 outer_chain: str,
12 12 inner_chain: str,
13 13 native: bool,
14 14 ) -> None:
15 15 pre_raise = "exceptiongroup." if not native else ""
16 pre_catch = pre_raise if sys.version_info < (3, 11) else ""
17 16 filestr = f"""
18 17 {"import exceptiongroup" if not native else ""}
19 18 import pytest
20 19
21 20 def f(): raise ValueError("From f()")
22 21 def g(): raise BaseException("From g()")
23 22
24 23 def inner(inner_chain):
25 24 excs = []
26 25 for callback in [f, g]:
27 26 try:
28 27 callback()
29 28 except BaseException as err:
30 29 excs.append(err)
31 30 if excs:
32 31 if inner_chain == "none":
33 32 raise {pre_raise}BaseExceptionGroup("Oops", excs)
34 33 try:
35 34 raise SyntaxError()
36 35 except SyntaxError as e:
37 36 if inner_chain == "from":
38 37 raise {pre_raise}BaseExceptionGroup("Oops", excs) from e
39 38 else:
40 39 raise {pre_raise}BaseExceptionGroup("Oops", excs)
41 40
42 41 def outer(outer_chain, inner_chain):
43 42 try:
44 43 inner(inner_chain)
45 except {pre_catch}BaseExceptionGroup as e:
44 except BaseExceptionGroup as e:
46 45 if outer_chain == "none":
47 46 raise
48 47 if outer_chain == "from":
49 48 raise IndexError() from e
50 49 else:
51 50 raise IndexError
52 51
53 52
54 53 outer("{outer_chain}", "{inner_chain}")
55 54 """
56 55 with capture_output() as cap:
57 56 ip.run_cell(filestr)
58 57
59 58 match_lines = []
60 59 if inner_chain == "another":
61 60 match_lines += [
62 61 "During handling of the above exception, another exception occurred:",
63 62 ]
64 63 elif inner_chain == "from":
65 64 match_lines += [
66 65 "The above exception was the direct cause of the following exception:",
67 66 ]
68 67
69 68 match_lines += [
70 69 " + Exception Group Traceback (most recent call last):",
71 f" | {pre_catch}BaseExceptionGroup: Oops (2 sub-exceptions)",
70 " | BaseExceptionGroup: Oops (2 sub-exceptions)",
72 71 " | ValueError: From f()",
73 72 " | BaseException: From g()",
74 73 ]
75 74
76 75 if outer_chain == "another":
77 76 match_lines += [
78 77 "During handling of the above exception, another exception occurred:",
79 78 "IndexError",
80 79 ]
81 80 elif outer_chain == "from":
82 81 match_lines += [
83 82 "The above exception was the direct cause of the following exception:",
84 83 "IndexError",
85 84 ]
86 85
87 86 error_lines = cap.stderr.split("\n")
88 87
89 88 err_index = match_index = 0
90 89 for expected in match_lines:
91 90 for i, actual in enumerate(error_lines):
92 91 if actual == expected:
93 92 error_lines = error_lines[i + 1 :]
94 93 break
95 94 else:
96 95 assert False, f"{expected} not found in cap.stderr"
97 96
98 97
99 @pytest.mark.skipif(
100 sys.version_info < (3, 11), reason="Native ExceptionGroup not implemented"
101 )
102 98 @pytest.mark.parametrize("outer_chain", ["none", "from", "another"])
103 99 @pytest.mark.parametrize("inner_chain", ["none", "from", "another"])
104 100 def test_native_exceptiongroup(outer_chain, inner_chain) -> None:
105 101 _exceptiongroup_common(outer_chain, inner_chain, native=True)
106 102
107 103
108 104 @pytest.mark.parametrize("outer_chain", ["none", "from", "another"])
109 105 @pytest.mark.parametrize("inner_chain", ["none", "from", "another"])
110 106 def test_native_exceptiongroup(outer_chain, inner_chain) -> None:
111 107 pytest.importorskip("exceptiongroup")
112 108 _exceptiongroup_common(outer_chain, inner_chain, native=False)
@@ -1,785 +1,782
1 1 import sys
2 2 from contextlib import contextmanager
3 3 from typing import (
4 4 Annotated,
5 5 AnyStr,
6 6 NamedTuple,
7 7 Literal,
8 8 NewType,
9 9 Optional,
10 10 Protocol,
11 11 TypeGuard,
12 12 Union,
13 13 TypedDict,
14 14 )
15 15 from functools import partial
16 16 from IPython.core.guarded_eval import (
17 17 EvaluationContext,
18 18 GuardRejection,
19 19 guarded_eval,
20 20 _unbind_method,
21 21 )
22 22 from IPython.testing import decorators as dec
23 23 import pytest
24 24
25 25
26 if sys.version_info < (3, 11):
27 from typing_extensions import Self, LiteralString
28 else:
29 from typing import Self, LiteralString
26 from typing import Self, LiteralString
30 27
31 28 if sys.version_info < (3, 12):
32 29 from typing_extensions import TypeAliasType
33 30 else:
34 31 from typing import TypeAliasType
35 32
36 33
37 34 def create_context(evaluation: str, **kwargs):
38 35 return EvaluationContext(locals=kwargs, globals={}, evaluation=evaluation)
39 36
40 37
41 38 forbidden = partial(create_context, "forbidden")
42 39 minimal = partial(create_context, "minimal")
43 40 limited = partial(create_context, "limited")
44 41 unsafe = partial(create_context, "unsafe")
45 42 dangerous = partial(create_context, "dangerous")
46 43
47 44 LIMITED_OR_HIGHER = [limited, unsafe, dangerous]
48 45 MINIMAL_OR_HIGHER = [minimal, *LIMITED_OR_HIGHER]
49 46
50 47
51 48 @contextmanager
52 49 def module_not_installed(module: str):
53 50 import sys
54 51
55 52 try:
56 53 to_restore = sys.modules[module]
57 54 del sys.modules[module]
58 55 except KeyError:
59 56 to_restore = None
60 57 try:
61 58 yield
62 59 finally:
63 60 sys.modules[module] = to_restore
64 61
65 62
66 63 def test_external_not_installed():
67 64 """
68 65 Because attribute check requires checking if object is not of allowed
69 66 external type, this tests logic for absence of external module.
70 67 """
71 68
72 69 class Custom:
73 70 def __init__(self):
74 71 self.test = 1
75 72
76 73 def __getattr__(self, key):
77 74 return key
78 75
79 76 with module_not_installed("pandas"):
80 77 context = limited(x=Custom())
81 78 with pytest.raises(GuardRejection):
82 79 guarded_eval("x.test", context)
83 80
84 81
85 82 @dec.skip_without("pandas")
86 83 def test_external_changed_api(monkeypatch):
87 84 """Check that the execution rejects if external API changed paths"""
88 85 import pandas as pd
89 86
90 87 series = pd.Series([1], index=["a"])
91 88
92 89 with monkeypatch.context() as m:
93 90 m.delattr(pd, "Series")
94 91 context = limited(data=series)
95 92 with pytest.raises(GuardRejection):
96 93 guarded_eval("data.iloc[0]", context)
97 94
98 95
99 96 @dec.skip_without("pandas")
100 97 def test_pandas_series_iloc():
101 98 import pandas as pd
102 99
103 100 series = pd.Series([1], index=["a"])
104 101 context = limited(data=series)
105 102 assert guarded_eval("data.iloc[0]", context) == 1
106 103
107 104
108 105 def test_rejects_custom_properties():
109 106 class BadProperty:
110 107 @property
111 108 def iloc(self):
112 109 return [None]
113 110
114 111 series = BadProperty()
115 112 context = limited(data=series)
116 113
117 114 with pytest.raises(GuardRejection):
118 115 guarded_eval("data.iloc[0]", context)
119 116
120 117
121 118 @dec.skip_without("pandas")
122 119 def test_accepts_non_overriden_properties():
123 120 import pandas as pd
124 121
125 122 class GoodProperty(pd.Series):
126 123 pass
127 124
128 125 series = GoodProperty([1], index=["a"])
129 126 context = limited(data=series)
130 127
131 128 assert guarded_eval("data.iloc[0]", context) == 1
132 129
133 130
134 131 @dec.skip_without("pandas")
135 132 def test_pandas_series():
136 133 import pandas as pd
137 134
138 135 context = limited(data=pd.Series([1], index=["a"]))
139 136 assert guarded_eval('data["a"]', context) == 1
140 137 with pytest.raises(KeyError):
141 138 guarded_eval('data["c"]', context)
142 139
143 140
144 141 @dec.skip_without("pandas")
145 142 def test_pandas_bad_series():
146 143 import pandas as pd
147 144
148 145 class BadItemSeries(pd.Series):
149 146 def __getitem__(self, key):
150 147 return "CUSTOM_ITEM"
151 148
152 149 class BadAttrSeries(pd.Series):
153 150 def __getattr__(self, key):
154 151 return "CUSTOM_ATTR"
155 152
156 153 bad_series = BadItemSeries([1], index=["a"])
157 154 context = limited(data=bad_series)
158 155
159 156 with pytest.raises(GuardRejection):
160 157 guarded_eval('data["a"]', context)
161 158 with pytest.raises(GuardRejection):
162 159 guarded_eval('data["c"]', context)
163 160
164 161 # note: here result is a bit unexpected because
165 162 # pandas `__getattr__` calls `__getitem__`;
166 163 # FIXME - special case to handle it?
167 164 assert guarded_eval("data.a", context) == "CUSTOM_ITEM"
168 165
169 166 context = unsafe(data=bad_series)
170 167 assert guarded_eval('data["a"]', context) == "CUSTOM_ITEM"
171 168
172 169 bad_attr_series = BadAttrSeries([1], index=["a"])
173 170 context = limited(data=bad_attr_series)
174 171 assert guarded_eval('data["a"]', context) == 1
175 172 with pytest.raises(GuardRejection):
176 173 guarded_eval("data.a", context)
177 174
178 175
179 176 @dec.skip_without("pandas")
180 177 def test_pandas_dataframe_loc():
181 178 import pandas as pd
182 179 from pandas.testing import assert_series_equal
183 180
184 181 data = pd.DataFrame([{"a": 1}])
185 182 context = limited(data=data)
186 183 assert_series_equal(guarded_eval('data.loc[:, "a"]', context), data["a"])
187 184
188 185
189 186 def test_named_tuple():
190 187 class GoodNamedTuple(NamedTuple):
191 188 a: str
192 189 pass
193 190
194 191 class BadNamedTuple(NamedTuple):
195 192 a: str
196 193
197 194 def __getitem__(self, key):
198 195 return None
199 196
200 197 good = GoodNamedTuple(a="x")
201 198 bad = BadNamedTuple(a="x")
202 199
203 200 context = limited(data=good)
204 201 assert guarded_eval("data[0]", context) == "x"
205 202
206 203 context = limited(data=bad)
207 204 with pytest.raises(GuardRejection):
208 205 guarded_eval("data[0]", context)
209 206
210 207
211 208 def test_dict():
212 209 context = limited(data={"a": 1, "b": {"x": 2}, ("x", "y"): 3})
213 210 assert guarded_eval('data["a"]', context) == 1
214 211 assert guarded_eval('data["b"]', context) == {"x": 2}
215 212 assert guarded_eval('data["b"]["x"]', context) == 2
216 213 assert guarded_eval('data["x", "y"]', context) == 3
217 214
218 215 assert guarded_eval("data.keys", context)
219 216
220 217
221 218 def test_set():
222 219 context = limited(data={"a", "b"})
223 220 assert guarded_eval("data.difference", context)
224 221
225 222
226 223 def test_list():
227 224 context = limited(data=[1, 2, 3])
228 225 assert guarded_eval("data[1]", context) == 2
229 226 assert guarded_eval("data.copy", context)
230 227
231 228
232 229 def test_dict_literal():
233 230 context = limited()
234 231 assert guarded_eval("{}", context) == {}
235 232 assert guarded_eval('{"a": 1}', context) == {"a": 1}
236 233
237 234
238 235 def test_list_literal():
239 236 context = limited()
240 237 assert guarded_eval("[]", context) == []
241 238 assert guarded_eval('[1, "a"]', context) == [1, "a"]
242 239
243 240
244 241 def test_set_literal():
245 242 context = limited()
246 243 assert guarded_eval("set()", context) == set()
247 244 assert guarded_eval('{"a"}', context) == {"a"}
248 245
249 246
250 247 def test_evaluates_if_expression():
251 248 context = limited()
252 249 assert guarded_eval("2 if True else 3", context) == 2
253 250 assert guarded_eval("4 if False else 5", context) == 5
254 251
255 252
256 253 def test_object():
257 254 obj = object()
258 255 context = limited(obj=obj)
259 256 assert guarded_eval("obj.__dir__", context) == obj.__dir__
260 257
261 258
262 259 @pytest.mark.parametrize(
263 260 "code,expected",
264 261 [
265 262 ["int.numerator", int.numerator],
266 263 ["float.is_integer", float.is_integer],
267 264 ["complex.real", complex.real],
268 265 ],
269 266 )
270 267 def test_number_attributes(code, expected):
271 268 assert guarded_eval(code, limited()) == expected
272 269
273 270
274 271 def test_method_descriptor():
275 272 context = limited()
276 273 assert guarded_eval("list.copy.__name__", context) == "copy"
277 274
278 275
279 276 class HeapType:
280 277 pass
281 278
282 279
283 280 class CallCreatesHeapType:
284 281 def __call__(self) -> HeapType:
285 282 return HeapType()
286 283
287 284
288 285 class CallCreatesBuiltin:
289 286 def __call__(self) -> frozenset:
290 287 return frozenset()
291 288
292 289
293 290 class HasStaticMethod:
294 291 @staticmethod
295 292 def static_method() -> HeapType:
296 293 return HeapType()
297 294
298 295
299 296 class InitReturnsFrozenset:
300 297 def __new__(self) -> frozenset: # type:ignore[misc]
301 298 return frozenset()
302 299
303 300
304 301 class StringAnnotation:
305 302 def heap(self) -> "HeapType":
306 303 return HeapType()
307 304
308 305 def copy(self) -> "StringAnnotation":
309 306 return StringAnnotation()
310 307
311 308
312 309 CustomIntType = NewType("CustomIntType", int)
313 310 CustomHeapType = NewType("CustomHeapType", HeapType)
314 311 IntTypeAlias = TypeAliasType("IntTypeAlias", int)
315 312 HeapTypeAlias = TypeAliasType("HeapTypeAlias", HeapType)
316 313
317 314
318 315 class TestProtocol(Protocol):
319 316 def test_method(self) -> bool:
320 317 pass
321 318
322 319
323 320 class TestProtocolImplementer(TestProtocol):
324 321 def test_method(self) -> bool:
325 322 return True
326 323
327 324
328 325 class Movie(TypedDict):
329 326 name: str
330 327 year: int
331 328
332 329
333 330 class SpecialTyping:
334 331 def custom_int_type(self) -> CustomIntType:
335 332 return CustomIntType(1)
336 333
337 334 def custom_heap_type(self) -> CustomHeapType:
338 335 return CustomHeapType(HeapType())
339 336
340 337 # TODO: remove type:ignore comment once mypy
341 338 # supports explicit calls to `TypeAliasType`, see:
342 339 # https://github.com/python/mypy/issues/16614
343 340 def int_type_alias(self) -> IntTypeAlias: # type:ignore[valid-type]
344 341 return 1
345 342
346 343 def heap_type_alias(self) -> HeapTypeAlias: # type:ignore[valid-type]
347 344 return 1
348 345
349 346 def literal(self) -> Literal[False]:
350 347 return False
351 348
352 349 def literal_string(self) -> LiteralString:
353 350 return "test"
354 351
355 352 def self(self) -> Self:
356 353 return self
357 354
358 355 def any_str(self, x: AnyStr) -> AnyStr:
359 356 return x
360 357
361 358 def annotated(self) -> Annotated[float, "positive number"]:
362 359 return 1
363 360
364 361 def annotated_self(self) -> Annotated[Self, "self with metadata"]:
365 362 self._metadata = "test"
366 363 return self
367 364
368 365 def int_type_guard(self, x) -> TypeGuard[int]:
369 366 return isinstance(x, int)
370 367
371 368 def optional_float(self) -> Optional[float]:
372 369 return 1.0
373 370
374 371 def union_str_and_int(self) -> Union[str, int]:
375 372 return ""
376 373
377 374 def protocol(self) -> TestProtocol:
378 375 return TestProtocolImplementer()
379 376
380 377 def typed_dict(self) -> Movie:
381 378 return {"name": "The Matrix", "year": 1999}
382 379
383 380
384 381 @pytest.mark.parametrize(
385 382 "data,code,expected,equality",
386 383 [
387 384 [[1, 2, 3], "data.index(2)", 1, True],
388 385 [{"a": 1}, "data.keys().isdisjoint({})", True, True],
389 386 [StringAnnotation(), "data.heap()", HeapType, False],
390 387 [StringAnnotation(), "data.copy()", StringAnnotation, False],
391 388 # test cases for `__call__`
392 389 [CallCreatesHeapType(), "data()", HeapType, False],
393 390 [CallCreatesBuiltin(), "data()", frozenset, False],
394 391 # Test cases for `__init__`
395 392 [HeapType, "data()", HeapType, False],
396 393 [InitReturnsFrozenset, "data()", frozenset, False],
397 394 [HeapType(), "data.__class__()", HeapType, False],
398 395 # supported special cases for typing
399 396 [SpecialTyping(), "data.custom_int_type()", int, False],
400 397 [SpecialTyping(), "data.custom_heap_type()", HeapType, False],
401 398 [SpecialTyping(), "data.int_type_alias()", int, False],
402 399 [SpecialTyping(), "data.heap_type_alias()", HeapType, False],
403 400 [SpecialTyping(), "data.self()", SpecialTyping, False],
404 401 [SpecialTyping(), "data.literal()", False, True],
405 402 [SpecialTyping(), "data.literal_string()", str, False],
406 403 [SpecialTyping(), "data.any_str('a')", str, False],
407 404 [SpecialTyping(), "data.any_str(b'a')", bytes, False],
408 405 [SpecialTyping(), "data.annotated()", float, False],
409 406 [SpecialTyping(), "data.annotated_self()", SpecialTyping, False],
410 407 [SpecialTyping(), "data.int_type_guard()", int, False],
411 408 # test cases for static methods
412 409 [HasStaticMethod, "data.static_method()", HeapType, False],
413 410 ],
414 411 )
415 412 def test_evaluates_calls(data, code, expected, equality):
416 413 context = limited(data=data, HeapType=HeapType, StringAnnotation=StringAnnotation)
417 414 value = guarded_eval(code, context)
418 415 if equality:
419 416 assert value == expected
420 417 else:
421 418 assert isinstance(value, expected)
422 419
423 420
424 421 @pytest.mark.parametrize(
425 422 "data,code,expected_attributes",
426 423 [
427 424 [SpecialTyping(), "data.optional_float()", ["is_integer"]],
428 425 [
429 426 SpecialTyping(),
430 427 "data.union_str_and_int()",
431 428 ["capitalize", "as_integer_ratio"],
432 429 ],
433 430 [SpecialTyping(), "data.protocol()", ["test_method"]],
434 431 [SpecialTyping(), "data.typed_dict()", ["keys", "values", "items"]],
435 432 ],
436 433 )
437 434 def test_mocks_attributes_of_call_results(data, code, expected_attributes):
438 435 context = limited(data=data, HeapType=HeapType, StringAnnotation=StringAnnotation)
439 436 result = guarded_eval(code, context)
440 437 for attr in expected_attributes:
441 438 assert hasattr(result, attr)
442 439 assert attr in dir(result)
443 440
444 441
445 442 @pytest.mark.parametrize(
446 443 "data,code,expected_items",
447 444 [
448 445 [SpecialTyping(), "data.typed_dict()", {"year": int, "name": str}],
449 446 ],
450 447 )
451 448 def test_mocks_items_of_call_results(data, code, expected_items):
452 449 context = limited(data=data, HeapType=HeapType, StringAnnotation=StringAnnotation)
453 450 result = guarded_eval(code, context)
454 451 ipython_keys = result._ipython_key_completions_()
455 452 for key, value in expected_items.items():
456 453 assert isinstance(result[key], value)
457 454 assert key in ipython_keys
458 455
459 456
460 457 @pytest.mark.parametrize(
461 458 "data,bad",
462 459 [
463 460 [[1, 2, 3], "data.append(4)"],
464 461 [{"a": 1}, "data.update()"],
465 462 ],
466 463 )
467 464 def test_rejects_calls_with_side_effects(data, bad):
468 465 context = limited(data=data)
469 466
470 467 with pytest.raises(GuardRejection):
471 468 guarded_eval(bad, context)
472 469
473 470
474 471 @pytest.mark.parametrize(
475 472 "code,expected",
476 473 [
477 474 ["(1\n+\n1)", 2],
478 475 ["list(range(10))[-1:]", [9]],
479 476 ["list(range(20))[3:-2:3]", [3, 6, 9, 12, 15]],
480 477 ],
481 478 )
482 479 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
483 480 def test_evaluates_complex_cases(code, expected, context):
484 481 assert guarded_eval(code, context()) == expected
485 482
486 483
487 484 @pytest.mark.parametrize(
488 485 "code,expected",
489 486 [
490 487 ["1", 1],
491 488 ["1.0", 1.0],
492 489 ["0xdeedbeef", 0xDEEDBEEF],
493 490 ["True", True],
494 491 ["None", None],
495 492 ["{}", {}],
496 493 ["[]", []],
497 494 ],
498 495 )
499 496 @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
500 497 def test_evaluates_literals(code, expected, context):
501 498 assert guarded_eval(code, context()) == expected
502 499
503 500
504 501 @pytest.mark.parametrize(
505 502 "code,expected",
506 503 [
507 504 ["-5", -5],
508 505 ["+5", +5],
509 506 ["~5", -6],
510 507 ],
511 508 )
512 509 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
513 510 def test_evaluates_unary_operations(code, expected, context):
514 511 assert guarded_eval(code, context()) == expected
515 512
516 513
517 514 @pytest.mark.parametrize(
518 515 "code,expected",
519 516 [
520 517 ["1 + 1", 2],
521 518 ["3 - 1", 2],
522 519 ["2 * 3", 6],
523 520 ["5 // 2", 2],
524 521 ["5 / 2", 2.5],
525 522 ["5**2", 25],
526 523 ["2 >> 1", 1],
527 524 ["2 << 1", 4],
528 525 ["1 | 2", 3],
529 526 ["1 & 1", 1],
530 527 ["1 & 2", 0],
531 528 ],
532 529 )
533 530 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
534 531 def test_evaluates_binary_operations(code, expected, context):
535 532 assert guarded_eval(code, context()) == expected
536 533
537 534
538 535 @pytest.mark.parametrize(
539 536 "code,expected",
540 537 [
541 538 ["2 > 1", True],
542 539 ["2 < 1", False],
543 540 ["2 <= 1", False],
544 541 ["2 <= 2", True],
545 542 ["1 >= 2", False],
546 543 ["2 >= 2", True],
547 544 ["2 == 2", True],
548 545 ["1 == 2", False],
549 546 ["1 != 2", True],
550 547 ["1 != 1", False],
551 548 ["1 < 4 < 3", False],
552 549 ["(1 < 4) < 3", True],
553 550 ["4 > 3 > 2 > 1", True],
554 551 ["4 > 3 > 2 > 9", False],
555 552 ["1 < 2 < 3 < 4", True],
556 553 ["9 < 2 < 3 < 4", False],
557 554 ["1 < 2 > 1 > 0 > -1 < 1", True],
558 555 ["1 in [1] in [[1]]", True],
559 556 ["1 in [1] in [[2]]", False],
560 557 ["1 in [1]", True],
561 558 ["0 in [1]", False],
562 559 ["1 not in [1]", False],
563 560 ["0 not in [1]", True],
564 561 ["True is True", True],
565 562 ["False is False", True],
566 563 ["True is False", False],
567 564 ["True is not True", False],
568 565 ["False is not True", True],
569 566 ],
570 567 )
571 568 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
572 569 def test_evaluates_comparisons(code, expected, context):
573 570 assert guarded_eval(code, context()) == expected
574 571
575 572
576 573 def test_guards_comparisons():
577 574 class GoodEq(int):
578 575 pass
579 576
580 577 class BadEq(int):
581 578 def __eq__(self, other):
582 579 assert False
583 580
584 581 context = limited(bad=BadEq(1), good=GoodEq(1))
585 582
586 583 with pytest.raises(GuardRejection):
587 584 guarded_eval("bad == 1", context)
588 585
589 586 with pytest.raises(GuardRejection):
590 587 guarded_eval("bad != 1", context)
591 588
592 589 with pytest.raises(GuardRejection):
593 590 guarded_eval("1 == bad", context)
594 591
595 592 with pytest.raises(GuardRejection):
596 593 guarded_eval("1 != bad", context)
597 594
598 595 assert guarded_eval("good == 1", context) is True
599 596 assert guarded_eval("good != 1", context) is False
600 597 assert guarded_eval("1 == good", context) is True
601 598 assert guarded_eval("1 != good", context) is False
602 599
603 600
604 601 def test_guards_unary_operations():
605 602 class GoodOp(int):
606 603 pass
607 604
608 605 class BadOpInv(int):
609 606 def __inv__(self, other):
610 607 assert False
611 608
612 609 class BadOpInverse(int):
613 610 def __inv__(self, other):
614 611 assert False
615 612
616 613 context = limited(good=GoodOp(1), bad1=BadOpInv(1), bad2=BadOpInverse(1))
617 614
618 615 with pytest.raises(GuardRejection):
619 616 guarded_eval("~bad1", context)
620 617
621 618 with pytest.raises(GuardRejection):
622 619 guarded_eval("~bad2", context)
623 620
624 621
625 622 def test_guards_binary_operations():
626 623 class GoodOp(int):
627 624 pass
628 625
629 626 class BadOp(int):
630 627 def __add__(self, other):
631 628 assert False
632 629
633 630 context = limited(good=GoodOp(1), bad=BadOp(1))
634 631
635 632 with pytest.raises(GuardRejection):
636 633 guarded_eval("1 + bad", context)
637 634
638 635 with pytest.raises(GuardRejection):
639 636 guarded_eval("bad + 1", context)
640 637
641 638 assert guarded_eval("good + 1", context) == 2
642 639 assert guarded_eval("1 + good", context) == 2
643 640
644 641
645 642 def test_guards_attributes():
646 643 class GoodAttr(float):
647 644 pass
648 645
649 646 class BadAttr1(float):
650 647 def __getattr__(self, key):
651 648 assert False
652 649
653 650 class BadAttr2(float):
654 651 def __getattribute__(self, key):
655 652 assert False
656 653
657 654 context = limited(good=GoodAttr(0.5), bad1=BadAttr1(0.5), bad2=BadAttr2(0.5))
658 655
659 656 with pytest.raises(GuardRejection):
660 657 guarded_eval("bad1.as_integer_ratio", context)
661 658
662 659 with pytest.raises(GuardRejection):
663 660 guarded_eval("bad2.as_integer_ratio", context)
664 661
665 662 assert guarded_eval("good.as_integer_ratio()", context) == (1, 2)
666 663
667 664
668 665 @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
669 666 def test_access_builtins(context):
670 667 assert guarded_eval("round", context()) == round
671 668
672 669
673 670 def test_access_builtins_fails():
674 671 context = limited()
675 672 with pytest.raises(NameError):
676 673 guarded_eval("this_is_not_builtin", context)
677 674
678 675
679 676 def test_rejects_forbidden():
680 677 context = forbidden()
681 678 with pytest.raises(GuardRejection):
682 679 guarded_eval("1", context)
683 680
684 681
685 682 def test_guards_locals_and_globals():
686 683 context = EvaluationContext(
687 684 locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="minimal"
688 685 )
689 686
690 687 with pytest.raises(GuardRejection):
691 688 guarded_eval("local_a", context)
692 689
693 690 with pytest.raises(GuardRejection):
694 691 guarded_eval("global_b", context)
695 692
696 693
697 694 def test_access_locals_and_globals():
698 695 context = EvaluationContext(
699 696 locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="limited"
700 697 )
701 698 assert guarded_eval("local_a", context) == "a"
702 699 assert guarded_eval("global_b", context) == "b"
703 700
704 701
705 702 @pytest.mark.parametrize(
706 703 "code",
707 704 ["def func(): pass", "class C: pass", "x = 1", "x += 1", "del x", "import ast"],
708 705 )
709 706 @pytest.mark.parametrize("context", [minimal(), limited(), unsafe()])
710 707 def test_rejects_side_effect_syntax(code, context):
711 708 with pytest.raises(SyntaxError):
712 709 guarded_eval(code, context)
713 710
714 711
715 712 def test_subscript():
716 713 context = EvaluationContext(
717 714 locals={}, globals={}, evaluation="limited", in_subscript=True
718 715 )
719 716 empty_slice = slice(None, None, None)
720 717 assert guarded_eval("", context) == tuple()
721 718 assert guarded_eval(":", context) == empty_slice
722 719 assert guarded_eval("1:2:3", context) == slice(1, 2, 3)
723 720 assert guarded_eval(':, "a"', context) == (empty_slice, "a")
724 721
725 722
726 723 def test_unbind_method():
727 724 class X(list):
728 725 def index(self, k):
729 726 return "CUSTOM"
730 727
731 728 x = X()
732 729 assert _unbind_method(x.index) is X.index
733 730 assert _unbind_method([].index) is list.index
734 731 assert _unbind_method(list.index) is None
735 732
736 733
737 734 def test_assumption_instance_attr_do_not_matter():
738 735 """This is semi-specified in Python documentation.
739 736
740 737 However, since the specification says 'not guaranteed
741 738 to work' rather than 'is forbidden to work', future
742 739 versions could invalidate this assumptions. This test
743 740 is meant to catch such a change if it ever comes true.
744 741 """
745 742
746 743 class T:
747 744 def __getitem__(self, k):
748 745 return "a"
749 746
750 747 def __getattr__(self, k):
751 748 return "a"
752 749
753 750 def f(self):
754 751 return "b"
755 752
756 753 t = T()
757 754 t.__getitem__ = f
758 755 t.__getattr__ = f
759 756 assert t[1] == "a"
760 757 assert t[1] == "a"
761 758
762 759
763 760 def test_assumption_named_tuples_share_getitem():
764 761 """Check assumption on named tuples sharing __getitem__"""
765 762 from typing import NamedTuple
766 763
767 764 class A(NamedTuple):
768 765 pass
769 766
770 767 class B(NamedTuple):
771 768 pass
772 769
773 770 assert A.__getitem__ == B.__getitem__
774 771
775 772
776 773 @dec.skip_without("numpy")
777 774 def test_module_access():
778 775 import numpy
779 776
780 777 context = limited(numpy=numpy)
781 778 assert guarded_eval("numpy.linalg.norm", context) == numpy.linalg.norm
782 779
783 780 context = minimal(numpy=numpy)
784 781 with pytest.raises(GuardRejection):
785 782 guarded_eval("np.linalg.norm", context)
@@ -1,643 +1,637
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for the inputsplitter module."""
3 3
4 4
5 5 # Copyright (c) IPython Development Team.
6 6 # Distributed under the terms of the Modified BSD License.
7 7
8 8 import unittest
9 9 import pytest
10 10 import sys
11 11
12 12 with pytest.warns(DeprecationWarning, match="inputsplitter"):
13 13 from IPython.core import inputsplitter as isp
14 14 from IPython.core.inputtransformer import InputTransformer
15 15 from IPython.core.tests.test_inputtransformer import syntax, syntax_ml
16 16 from IPython.testing import tools as tt
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Semi-complete examples (also used as tests)
20 20 #-----------------------------------------------------------------------------
21 21
22 22 # Note: at the bottom, there's a slightly more complete version of this that
23 23 # can be useful during development of code here.
24 24
25 25 def mini_interactive_loop(input_func):
26 26 """Minimal example of the logic of an interactive interpreter loop.
27 27
28 28 This serves as an example, and it is used by the test system with a fake
29 29 raw_input that simulates interactive input."""
30 30
31 31 from IPython.core.inputsplitter import InputSplitter
32 32
33 33 isp = InputSplitter()
34 34 # In practice, this input loop would be wrapped in an outside loop to read
35 35 # input indefinitely, until some exit/quit command was issued. Here we
36 36 # only illustrate the basic inner loop.
37 37 while isp.push_accepts_more():
38 38 indent = ' '*isp.get_indent_spaces()
39 39 prompt = '>>> ' + indent
40 40 line = indent + input_func(prompt)
41 41 isp.push(line)
42 42
43 43 # Here we just return input so we can use it in a test suite, but a real
44 44 # interpreter would instead send it for execution somewhere.
45 45 src = isp.source_reset()
46 46 # print('Input source was:\n', src) # dbg
47 47 return src
48 48
49 49 #-----------------------------------------------------------------------------
50 50 # Test utilities, just for local use
51 51 #-----------------------------------------------------------------------------
52 52
53 53
54 54 def pseudo_input(lines):
55 55 """Return a function that acts like raw_input but feeds the input list."""
56 56 ilines = iter(lines)
57 57 def raw_in(prompt):
58 58 try:
59 59 return next(ilines)
60 60 except StopIteration:
61 61 return ''
62 62 return raw_in
63 63
64 64 #-----------------------------------------------------------------------------
65 65 # Tests
66 66 #-----------------------------------------------------------------------------
67 67 def test_spaces():
68 68 tests = [('', 0),
69 69 (' ', 1),
70 70 ('\n', 0),
71 71 (' \n', 1),
72 72 ('x', 0),
73 73 (' x', 1),
74 74 (' x',2),
75 75 (' x',4),
76 76 # Note: tabs are counted as a single whitespace!
77 77 ('\tx', 1),
78 78 ('\t x', 2),
79 79 ]
80 80 with pytest.warns(PendingDeprecationWarning):
81 81 tt.check_pairs(isp.num_ini_spaces, tests)
82 82
83 83
84 84 def test_remove_comments():
85 85 tests = [('text', 'text'),
86 86 ('text # comment', 'text '),
87 87 ('text # comment\n', 'text \n'),
88 88 ('text # comment \n', 'text \n'),
89 89 ('line # c \nline\n','line \nline\n'),
90 90 ('line # c \nline#c2 \nline\nline #c\n\n',
91 91 'line \nline\nline\nline \n\n'),
92 92 ]
93 93 tt.check_pairs(isp.remove_comments, tests)
94 94
95 95
96 96 def test_get_input_encoding():
97 97 encoding = isp.get_input_encoding()
98 98 assert isinstance(encoding, str)
99 99 # simple-minded check that at least encoding a simple string works with the
100 100 # encoding we got.
101 101 assert "test".encode(encoding) == b"test"
102 102
103 103
104 104 class NoInputEncodingTestCase(unittest.TestCase):
105 105 def setUp(self):
106 106 self.old_stdin = sys.stdin
107 107 class X: pass
108 108 fake_stdin = X()
109 109 sys.stdin = fake_stdin
110 110
111 111 def test(self):
112 112 # Verify that if sys.stdin has no 'encoding' attribute we do the right
113 113 # thing
114 114 enc = isp.get_input_encoding()
115 115 self.assertEqual(enc, 'ascii')
116 116
117 117 def tearDown(self):
118 118 sys.stdin = self.old_stdin
119 119
120 120
121 121 class InputSplitterTestCase(unittest.TestCase):
122 122 def setUp(self):
123 123 self.isp = isp.InputSplitter()
124 124
125 125 def test_reset(self):
126 126 isp = self.isp
127 127 isp.push('x=1')
128 128 isp.reset()
129 129 self.assertEqual(isp._buffer, [])
130 130 self.assertEqual(isp.get_indent_spaces(), 0)
131 131 self.assertEqual(isp.source, '')
132 132 self.assertEqual(isp.code, None)
133 133 self.assertEqual(isp._is_complete, False)
134 134
135 135 def test_source(self):
136 136 self.isp._store('1')
137 137 self.isp._store('2')
138 138 self.assertEqual(self.isp.source, '1\n2\n')
139 139 self.assertEqual(len(self.isp._buffer)>0, True)
140 140 self.assertEqual(self.isp.source_reset(), '1\n2\n')
141 141 self.assertEqual(self.isp._buffer, [])
142 142 self.assertEqual(self.isp.source, '')
143 143
144 144 def test_indent(self):
145 145 isp = self.isp # shorthand
146 146 isp.push('x=1')
147 147 self.assertEqual(isp.get_indent_spaces(), 0)
148 148 isp.push('if 1:\n x=1')
149 149 self.assertEqual(isp.get_indent_spaces(), 4)
150 150 isp.push('y=2\n')
151 151 self.assertEqual(isp.get_indent_spaces(), 0)
152 152
153 153 def test_indent2(self):
154 154 isp = self.isp
155 155 isp.push('if 1:')
156 156 self.assertEqual(isp.get_indent_spaces(), 4)
157 157 isp.push(' x=1')
158 158 self.assertEqual(isp.get_indent_spaces(), 4)
159 159 # Blank lines shouldn't change the indent level
160 160 isp.push(' '*2)
161 161 self.assertEqual(isp.get_indent_spaces(), 4)
162 162
163 163 def test_indent3(self):
164 164 isp = self.isp
165 165 # When a multiline statement contains parens or multiline strings, we
166 166 # shouldn't get confused.
167 167 isp.push("if 1:")
168 168 isp.push(" x = (1+\n 2)")
169 169 self.assertEqual(isp.get_indent_spaces(), 4)
170 170
171 171 def test_indent4(self):
172 172 isp = self.isp
173 173 # whitespace after ':' should not screw up indent level
174 174 isp.push('if 1: \n x=1')
175 175 self.assertEqual(isp.get_indent_spaces(), 4)
176 176 isp.push('y=2\n')
177 177 self.assertEqual(isp.get_indent_spaces(), 0)
178 178 isp.push('if 1:\t\n x=1')
179 179 self.assertEqual(isp.get_indent_spaces(), 4)
180 180 isp.push('y=2\n')
181 181 self.assertEqual(isp.get_indent_spaces(), 0)
182 182
183 183 def test_dedent_pass(self):
184 184 isp = self.isp # shorthand
185 185 # should NOT cause dedent
186 186 isp.push('if 1:\n passes = 5')
187 187 self.assertEqual(isp.get_indent_spaces(), 4)
188 188 isp.push('if 1:\n pass')
189 189 self.assertEqual(isp.get_indent_spaces(), 0)
190 190 isp.push('if 1:\n pass ')
191 191 self.assertEqual(isp.get_indent_spaces(), 0)
192 192
193 193 def test_dedent_break(self):
194 194 isp = self.isp # shorthand
195 195 # should NOT cause dedent
196 196 isp.push('while 1:\n breaks = 5')
197 197 self.assertEqual(isp.get_indent_spaces(), 4)
198 198 isp.push('while 1:\n break')
199 199 self.assertEqual(isp.get_indent_spaces(), 0)
200 200 isp.push('while 1:\n break ')
201 201 self.assertEqual(isp.get_indent_spaces(), 0)
202 202
203 203 def test_dedent_continue(self):
204 204 isp = self.isp # shorthand
205 205 # should NOT cause dedent
206 206 isp.push('while 1:\n continues = 5')
207 207 self.assertEqual(isp.get_indent_spaces(), 4)
208 208 isp.push('while 1:\n continue')
209 209 self.assertEqual(isp.get_indent_spaces(), 0)
210 210 isp.push('while 1:\n continue ')
211 211 self.assertEqual(isp.get_indent_spaces(), 0)
212 212
213 213 def test_dedent_raise(self):
214 214 isp = self.isp # shorthand
215 215 # should NOT cause dedent
216 216 isp.push('if 1:\n raised = 4')
217 217 self.assertEqual(isp.get_indent_spaces(), 4)
218 218 isp.push('if 1:\n raise TypeError()')
219 219 self.assertEqual(isp.get_indent_spaces(), 0)
220 220 isp.push('if 1:\n raise')
221 221 self.assertEqual(isp.get_indent_spaces(), 0)
222 222 isp.push('if 1:\n raise ')
223 223 self.assertEqual(isp.get_indent_spaces(), 0)
224 224
225 225 def test_dedent_return(self):
226 226 isp = self.isp # shorthand
227 227 # should NOT cause dedent
228 228 isp.push('if 1:\n returning = 4')
229 229 self.assertEqual(isp.get_indent_spaces(), 4)
230 230 isp.push('if 1:\n return 5 + 493')
231 231 self.assertEqual(isp.get_indent_spaces(), 0)
232 232 isp.push('if 1:\n return')
233 233 self.assertEqual(isp.get_indent_spaces(), 0)
234 234 isp.push('if 1:\n return ')
235 235 self.assertEqual(isp.get_indent_spaces(), 0)
236 236 isp.push('if 1:\n return(0)')
237 237 self.assertEqual(isp.get_indent_spaces(), 0)
238 238
239 239 def test_push(self):
240 240 isp = self.isp
241 241 self.assertEqual(isp.push('x=1'), True)
242 242
243 243 def test_push2(self):
244 244 isp = self.isp
245 245 self.assertEqual(isp.push('if 1:'), False)
246 246 for line in [' x=1', '# a comment', ' y=2']:
247 247 print(line)
248 248 self.assertEqual(isp.push(line), True)
249 249
250 250 def test_push3(self):
251 251 isp = self.isp
252 252 isp.push('if True:')
253 253 isp.push(' a = 1')
254 254 self.assertEqual(isp.push('b = [1,'), False)
255 255
256 256 def test_push_accepts_more(self):
257 257 isp = self.isp
258 258 isp.push('x=1')
259 259 self.assertEqual(isp.push_accepts_more(), False)
260 260
261 261 def test_push_accepts_more2(self):
262 262 isp = self.isp
263 263 isp.push('if 1:')
264 264 self.assertEqual(isp.push_accepts_more(), True)
265 265 isp.push(' x=1')
266 266 self.assertEqual(isp.push_accepts_more(), True)
267 267 isp.push('')
268 268 self.assertEqual(isp.push_accepts_more(), False)
269 269
270 270 def test_push_accepts_more3(self):
271 271 isp = self.isp
272 272 isp.push("x = (2+\n3)")
273 273 self.assertEqual(isp.push_accepts_more(), False)
274 274
275 275 def test_push_accepts_more4(self):
276 276 isp = self.isp
277 277 # When a multiline statement contains parens or multiline strings, we
278 278 # shouldn't get confused.
279 279 # FIXME: we should be able to better handle de-dents in statements like
280 280 # multiline strings and multiline expressions (continued with \ or
281 281 # parens). Right now we aren't handling the indentation tracking quite
282 282 # correctly with this, though in practice it may not be too much of a
283 283 # problem. We'll need to see.
284 284 isp.push("if 1:")
285 285 isp.push(" x = (2+")
286 286 isp.push(" 3)")
287 287 self.assertEqual(isp.push_accepts_more(), True)
288 288 isp.push(" y = 3")
289 289 self.assertEqual(isp.push_accepts_more(), True)
290 290 isp.push('')
291 291 self.assertEqual(isp.push_accepts_more(), False)
292 292
293 293 def test_push_accepts_more5(self):
294 294 isp = self.isp
295 295 isp.push('try:')
296 296 isp.push(' a = 5')
297 297 isp.push('except:')
298 298 isp.push(' raise')
299 299 # We want to be able to add an else: block at this point, so it should
300 300 # wait for a blank line.
301 301 self.assertEqual(isp.push_accepts_more(), True)
302 302
303 303 def test_continuation(self):
304 304 isp = self.isp
305 305 isp.push("import os, \\")
306 306 self.assertEqual(isp.push_accepts_more(), True)
307 307 isp.push("sys")
308 308 self.assertEqual(isp.push_accepts_more(), False)
309 309
310 310 def test_syntax_error(self):
311 311 isp = self.isp
312 312 # Syntax errors immediately produce a 'ready' block, so the invalid
313 313 # Python can be sent to the kernel for evaluation with possible ipython
314 314 # special-syntax conversion.
315 315 isp.push('run foo')
316 316 self.assertEqual(isp.push_accepts_more(), False)
317 317
318 318 def test_unicode(self):
319 319 self.isp.push(u"Pérez")
320 320 self.isp.push(u'\xc3\xa9')
321 321 self.isp.push(u"u'\xc3\xa9'")
322 322
323 @pytest.mark.xfail(
324 reason="Bug in python 3.9.8 – bpo 45738",
325 condition=sys.version_info in [(3, 11, 0, "alpha", 2)],
326 raises=SystemError,
327 strict=True,
328 )
329 323 def test_line_continuation(self):
330 324 """ Test issue #2108."""
331 325 isp = self.isp
332 326 # A blank line after a line continuation should not accept more
333 327 isp.push("1 \\\n\n")
334 328 self.assertEqual(isp.push_accepts_more(), False)
335 329 # Whitespace after a \ is a SyntaxError. The only way to test that
336 330 # here is to test that push doesn't accept more (as with
337 331 # test_syntax_error() above).
338 332 isp.push(r"1 \ ")
339 333 self.assertEqual(isp.push_accepts_more(), False)
340 334 # Even if the line is continuable (c.f. the regular Python
341 335 # interpreter)
342 336 isp.push(r"(1 \ ")
343 337 self.assertEqual(isp.push_accepts_more(), False)
344 338
345 339 def test_check_complete(self):
346 340 isp = self.isp
347 341 self.assertEqual(isp.check_complete("a = 1"), ('complete', None))
348 342 self.assertEqual(isp.check_complete("for a in range(5):"), ('incomplete', 4))
349 343 self.assertEqual(isp.check_complete("raise = 2"), ('invalid', None))
350 344 self.assertEqual(isp.check_complete("a = [1,\n2,"), ('incomplete', 0))
351 345 self.assertEqual(isp.check_complete("def a():\n x=1\n global x"), ('invalid', None))
352 346
353 347 class InteractiveLoopTestCase(unittest.TestCase):
354 348 """Tests for an interactive loop like a python shell.
355 349 """
356 350 def check_ns(self, lines, ns):
357 351 """Validate that the given input lines produce the resulting namespace.
358 352
359 353 Note: the input lines are given exactly as they would be typed in an
360 354 auto-indenting environment, as mini_interactive_loop above already does
361 355 auto-indenting and prepends spaces to the input.
362 356 """
363 357 src = mini_interactive_loop(pseudo_input(lines))
364 358 test_ns = {}
365 359 exec(src, test_ns)
366 360 # We can't check that the provided ns is identical to the test_ns,
367 361 # because Python fills test_ns with extra keys (copyright, etc). But
368 362 # we can check that the given dict is *contained* in test_ns
369 363 for k,v in ns.items():
370 364 self.assertEqual(test_ns[k], v)
371 365
372 366 def test_simple(self):
373 367 self.check_ns(['x=1'], dict(x=1))
374 368
375 369 def test_simple2(self):
376 370 self.check_ns(['if 1:', 'x=2'], dict(x=2))
377 371
378 372 def test_xy(self):
379 373 self.check_ns(['x=1; y=2'], dict(x=1, y=2))
380 374
381 375 def test_abc(self):
382 376 self.check_ns(['if 1:','a=1','b=2','c=3'], dict(a=1, b=2, c=3))
383 377
384 378 def test_multi(self):
385 379 self.check_ns(['x =(1+','1+','2)'], dict(x=4))
386 380
387 381
388 382 class IPythonInputTestCase(InputSplitterTestCase):
389 383 """By just creating a new class whose .isp is a different instance, we
390 384 re-run the same test battery on the new input splitter.
391 385
392 386 In addition, this runs the tests over the syntax and syntax_ml dicts that
393 387 were tested by individual functions, as part of the OO interface.
394 388
395 389 It also makes some checks on the raw buffer storage.
396 390 """
397 391
398 392 def setUp(self):
399 393 self.isp = isp.IPythonInputSplitter()
400 394
401 395 def test_syntax(self):
402 396 """Call all single-line syntax tests from the main object"""
403 397 isp = self.isp
404 398 for example in syntax.values():
405 399 for raw, out_t in example:
406 400 if raw.startswith(' '):
407 401 continue
408 402
409 403 isp.push(raw+'\n')
410 404 out_raw = isp.source_raw
411 405 out = isp.source_reset()
412 406 self.assertEqual(out.rstrip(), out_t,
413 407 tt.pair_fail_msg.format("inputsplitter",raw, out_t, out))
414 408 self.assertEqual(out_raw.rstrip(), raw.rstrip())
415 409
416 410 def test_syntax_multiline(self):
417 411 isp = self.isp
418 412 for example in syntax_ml.values():
419 413 for line_pairs in example:
420 414 out_t_parts = []
421 415 raw_parts = []
422 416 for lraw, out_t_part in line_pairs:
423 417 if out_t_part is not None:
424 418 out_t_parts.append(out_t_part)
425 419
426 420 if lraw is not None:
427 421 isp.push(lraw)
428 422 raw_parts.append(lraw)
429 423
430 424 out_raw = isp.source_raw
431 425 out = isp.source_reset()
432 426 out_t = '\n'.join(out_t_parts).rstrip()
433 427 raw = '\n'.join(raw_parts).rstrip()
434 428 self.assertEqual(out.rstrip(), out_t)
435 429 self.assertEqual(out_raw.rstrip(), raw)
436 430
437 431 def test_syntax_multiline_cell(self):
438 432 isp = self.isp
439 433 for example in syntax_ml.values():
440 434
441 435 out_t_parts = []
442 436 for line_pairs in example:
443 437 raw = '\n'.join(r for r, _ in line_pairs if r is not None)
444 438 out_t = '\n'.join(t for _,t in line_pairs if t is not None)
445 439 out = isp.transform_cell(raw)
446 440 # Match ignoring trailing whitespace
447 441 self.assertEqual(out.rstrip(), out_t.rstrip())
448 442
449 443 def test_cellmagic_preempt(self):
450 444 isp = self.isp
451 445 for raw, name, line, cell in [
452 446 ("%%cellm a\nIn[1]:", u'cellm', u'a', u'In[1]:'),
453 447 ("%%cellm \nline\n>>> hi", u'cellm', u'', u'line\n>>> hi'),
454 448 (">>> %%cellm \nline\n>>> hi", u'cellm', u'', u'line\nhi'),
455 449 ("%%cellm \n>>> hi", u'cellm', u'', u'>>> hi'),
456 450 ("%%cellm \nline1\nline2", u'cellm', u'', u'line1\nline2'),
457 451 ("%%cellm \nline1\\\\\nline2", u'cellm', u'', u'line1\\\\\nline2'),
458 452 ]:
459 453 expected = "get_ipython().run_cell_magic(%r, %r, %r)" % (
460 454 name, line, cell
461 455 )
462 456 out = isp.transform_cell(raw)
463 457 self.assertEqual(out.rstrip(), expected.rstrip())
464 458
465 459 def test_multiline_passthrough(self):
466 460 isp = self.isp
467 461 class CommentTransformer(InputTransformer):
468 462 def __init__(self):
469 463 self._lines = []
470 464
471 465 def push(self, line):
472 466 self._lines.append(line + '#')
473 467
474 468 def reset(self):
475 469 text = '\n'.join(self._lines)
476 470 self._lines = []
477 471 return text
478 472
479 473 isp.physical_line_transforms.insert(0, CommentTransformer())
480 474
481 475 for raw, expected in [
482 476 ("a=5", "a=5#"),
483 477 ("%ls foo", "get_ipython().run_line_magic(%r, %r)" % (u'ls', u'foo#')),
484 478 ("!ls foo\n%ls bar", "get_ipython().system(%r)\nget_ipython().run_line_magic(%r, %r)" % (
485 479 u'ls foo#', u'ls', u'bar#'
486 480 )),
487 481 ("1\n2\n3\n%ls foo\n4\n5", "1#\n2#\n3#\nget_ipython().run_line_magic(%r, %r)\n4#\n5#" % (u'ls', u'foo#')),
488 482 ]:
489 483 out = isp.transform_cell(raw)
490 484 self.assertEqual(out.rstrip(), expected.rstrip())
491 485
492 486 #-----------------------------------------------------------------------------
493 487 # Main - use as a script, mostly for developer experiments
494 488 #-----------------------------------------------------------------------------
495 489
496 490 if __name__ == '__main__':
497 491 # A simple demo for interactive experimentation. This code will not get
498 492 # picked up by any test suite.
499 493 from IPython.core.inputsplitter import IPythonInputSplitter
500 494
501 495 # configure here the syntax to use, prompt and whether to autoindent
502 496 #isp, start_prompt = InputSplitter(), '>>> '
503 497 isp, start_prompt = IPythonInputSplitter(), 'In> '
504 498
505 499 autoindent = True
506 500 #autoindent = False
507 501
508 502 try:
509 503 while True:
510 504 prompt = start_prompt
511 505 while isp.push_accepts_more():
512 506 indent = ' '*isp.get_indent_spaces()
513 507 if autoindent:
514 508 line = indent + input(prompt+indent)
515 509 else:
516 510 line = input(prompt)
517 511 isp.push(line)
518 512 prompt = '... '
519 513
520 514 # Here we just return input so we can use it in a test suite, but a
521 515 # real interpreter would instead send it for execution somewhere.
522 516 #src = isp.source; raise EOFError # dbg
523 517 raw = isp.source_raw
524 518 src = isp.source_reset()
525 519 print('Input source was:\n', src)
526 520 print('Raw source was:\n', raw)
527 521 except EOFError:
528 522 print('Bye')
529 523
530 524 # Tests for cell magics support
531 525
532 526 def test_last_blank():
533 527 assert isp.last_blank("") is False
534 528 assert isp.last_blank("abc") is False
535 529 assert isp.last_blank("abc\n") is False
536 530 assert isp.last_blank("abc\na") is False
537 531
538 532 assert isp.last_blank("\n") is True
539 533 assert isp.last_blank("\n ") is True
540 534 assert isp.last_blank("abc\n ") is True
541 535 assert isp.last_blank("abc\n\n") is True
542 536 assert isp.last_blank("abc\nd\n\n") is True
543 537 assert isp.last_blank("abc\nd\ne\n\n") is True
544 538 assert isp.last_blank("abc \n \n \n\n") is True
545 539
546 540
547 541 def test_last_two_blanks():
548 542 assert isp.last_two_blanks("") is False
549 543 assert isp.last_two_blanks("abc") is False
550 544 assert isp.last_two_blanks("abc\n") is False
551 545 assert isp.last_two_blanks("abc\n\na") is False
552 546 assert isp.last_two_blanks("abc\n \n") is False
553 547 assert isp.last_two_blanks("abc\n\n") is False
554 548
555 549 assert isp.last_two_blanks("\n\n") is True
556 550 assert isp.last_two_blanks("\n\n ") is True
557 551 assert isp.last_two_blanks("\n \n") is True
558 552 assert isp.last_two_blanks("abc\n\n ") is True
559 553 assert isp.last_two_blanks("abc\n\n\n") is True
560 554 assert isp.last_two_blanks("abc\n\n \n") is True
561 555 assert isp.last_two_blanks("abc\n\n \n ") is True
562 556 assert isp.last_two_blanks("abc\n\n \n \n") is True
563 557 assert isp.last_two_blanks("abc\nd\n\n\n") is True
564 558 assert isp.last_two_blanks("abc\nd\ne\nf\n\n\n") is True
565 559
566 560
567 561 class CellMagicsCommon(object):
568 562
569 563 def test_whole_cell(self):
570 564 src = "%%cellm line\nbody\n"
571 565 out = self.sp.transform_cell(src)
572 566 ref = "get_ipython().run_cell_magic('cellm', 'line', 'body')\n"
573 567 assert out == ref
574 568
575 569 def test_cellmagic_help(self):
576 570 self.sp.push('%%cellm?')
577 571 assert self.sp.push_accepts_more() is False
578 572
579 573 def tearDown(self):
580 574 self.sp.reset()
581 575
582 576
583 577 class CellModeCellMagics(CellMagicsCommon, unittest.TestCase):
584 578 sp = isp.IPythonInputSplitter(line_input_checker=False)
585 579
586 580 def test_incremental(self):
587 581 sp = self.sp
588 582 sp.push("%%cellm firstline\n")
589 583 assert sp.push_accepts_more() is True # 1
590 584 sp.push("line2\n")
591 585 assert sp.push_accepts_more() is True # 2
592 586 sp.push("\n")
593 587 # This should accept a blank line and carry on until the cell is reset
594 588 assert sp.push_accepts_more() is True # 3
595 589
596 590 def test_no_strip_coding(self):
597 591 src = '\n'.join([
598 592 '%%writefile foo.py',
599 593 '# coding: utf-8',
600 594 'print(u"üñîçø∂é")',
601 595 ])
602 596 out = self.sp.transform_cell(src)
603 597 assert "# coding: utf-8" in out
604 598
605 599
606 600 class LineModeCellMagics(CellMagicsCommon, unittest.TestCase):
607 601 sp = isp.IPythonInputSplitter(line_input_checker=True)
608 602
609 603 def test_incremental(self):
610 604 sp = self.sp
611 605 sp.push("%%cellm line2\n")
612 606 assert sp.push_accepts_more() is True # 1
613 607 sp.push("\n")
614 608 # In this case, a blank line should end the cell magic
615 609 assert sp.push_accepts_more() is False # 2
616 610
617 611
618 612 indentation_samples = [
619 613 ('a = 1', 0),
620 614 ('for a in b:', 4),
621 615 ('def f():', 4),
622 616 ('def f(): #comment', 4),
623 617 ('a = ":#not a comment"', 0),
624 618 ('def f():\n a = 1', 4),
625 619 ('def f():\n return 1', 0),
626 620 ('for a in b:\n'
627 621 ' if a < 0:'
628 622 ' continue', 3),
629 623 ('a = {', 4),
630 624 ('a = {\n'
631 625 ' 1,', 5),
632 626 ('b = """123', 0),
633 627 ('', 0),
634 628 ('def f():\n pass', 0),
635 629 ('class Bar:\n def f():\n pass', 4),
636 630 ('class Bar:\n def f():\n raise', 4),
637 631 ]
638 632
639 633 def test_find_next_indent():
640 634 for code, exp in indentation_samples:
641 635 res = isp.find_next_indent(code)
642 636 msg = "{!r} != {!r} (expected)\n Code: {!r}".format(res, exp, code)
643 637 assert res == exp, msg
@@ -1,448 +1,432
1 1 """Tests for the token-based transformers in IPython.core.inputtransformer2
2 2
3 3 Line-based transformers are the simpler ones; token-based transformers are
4 4 more complex. See test_inputtransformer2_line for tests for line-based
5 5 transformations.
6 6 """
7 7
8 8 import platform
9 9 import string
10 10 import sys
11 11 from textwrap import dedent
12 12
13 13 import pytest
14 14
15 15 from IPython.core import inputtransformer2 as ipt2
16 16 from IPython.core.inputtransformer2 import _find_assign_op, make_tokens_by_line
17 17
18 18 MULTILINE_MAGIC = (
19 19 """\
20 20 a = f()
21 21 %foo \\
22 22 bar
23 23 g()
24 24 """.splitlines(
25 25 keepends=True
26 26 ),
27 27 (2, 0),
28 28 """\
29 29 a = f()
30 30 get_ipython().run_line_magic('foo', ' bar')
31 31 g()
32 32 """.splitlines(
33 33 keepends=True
34 34 ),
35 35 )
36 36
37 37 INDENTED_MAGIC = (
38 38 """\
39 39 for a in range(5):
40 40 %ls
41 41 """.splitlines(
42 42 keepends=True
43 43 ),
44 44 (2, 4),
45 45 """\
46 46 for a in range(5):
47 47 get_ipython().run_line_magic('ls', '')
48 48 """.splitlines(
49 49 keepends=True
50 50 ),
51 51 )
52 52
53 53 CRLF_MAGIC = (
54 54 ["a = f()\n", "%ls\r\n", "g()\n"],
55 55 (2, 0),
56 56 ["a = f()\n", "get_ipython().run_line_magic('ls', '')\n", "g()\n"],
57 57 )
58 58
59 59 MULTILINE_MAGIC_ASSIGN = (
60 60 """\
61 61 a = f()
62 62 b = %foo \\
63 63 bar
64 64 g()
65 65 """.splitlines(
66 66 keepends=True
67 67 ),
68 68 (2, 4),
69 69 """\
70 70 a = f()
71 71 b = get_ipython().run_line_magic('foo', ' bar')
72 72 g()
73 73 """.splitlines(
74 74 keepends=True
75 75 ),
76 76 )
77 77
78 78 MULTILINE_SYSTEM_ASSIGN = ("""\
79 79 a = f()
80 80 b = !foo \\
81 81 bar
82 82 g()
83 83 """.splitlines(keepends=True), (2, 4), """\
84 84 a = f()
85 85 b = get_ipython().getoutput('foo bar')
86 86 g()
87 87 """.splitlines(keepends=True))
88 88
89 89 #####
90 90
91 91 MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT = (
92 92 """\
93 93 def test():
94 94 for i in range(1):
95 95 print(i)
96 96 res =! ls
97 97 """.splitlines(
98 98 keepends=True
99 99 ),
100 100 (4, 7),
101 101 """\
102 102 def test():
103 103 for i in range(1):
104 104 print(i)
105 105 res =get_ipython().getoutput(\' ls\')
106 106 """.splitlines(
107 107 keepends=True
108 108 ),
109 109 )
110 110
111 111 ######
112 112
113 113 AUTOCALL_QUOTE = ([",f 1 2 3\n"], (1, 0), ['f("1", "2", "3")\n'])
114 114
115 115 AUTOCALL_QUOTE2 = ([";f 1 2 3\n"], (1, 0), ['f("1 2 3")\n'])
116 116
117 117 AUTOCALL_PAREN = (["/f 1 2 3\n"], (1, 0), ["f(1, 2, 3)\n"])
118 118
119 119 SIMPLE_HELP = (["foo?\n"], (1, 0), ["get_ipython().run_line_magic('pinfo', 'foo')\n"])
120 120
121 121 DETAILED_HELP = (
122 122 ["foo??\n"],
123 123 (1, 0),
124 124 ["get_ipython().run_line_magic('pinfo2', 'foo')\n"],
125 125 )
126 126
127 127 MAGIC_HELP = (["%foo?\n"], (1, 0), ["get_ipython().run_line_magic('pinfo', '%foo')\n"])
128 128
129 129 HELP_IN_EXPR = (
130 130 ["a = b + c?\n"],
131 131 (1, 0),
132 132 ["get_ipython().run_line_magic('pinfo', 'c')\n"],
133 133 )
134 134
135 135 HELP_CONTINUED_LINE = (
136 136 """\
137 137 a = \\
138 138 zip?
139 139 """.splitlines(
140 140 keepends=True
141 141 ),
142 142 (1, 0),
143 143 [r"get_ipython().run_line_magic('pinfo', 'zip')" + "\n"],
144 144 )
145 145
146 146 HELP_MULTILINE = (
147 147 """\
148 148 (a,
149 149 b) = zip?
150 150 """.splitlines(
151 151 keepends=True
152 152 ),
153 153 (1, 0),
154 154 [r"get_ipython().run_line_magic('pinfo', 'zip')" + "\n"],
155 155 )
156 156
157 157 HELP_UNICODE = (
158 158 ["π.foo?\n"],
159 159 (1, 0),
160 160 ["get_ipython().run_line_magic('pinfo', 'π.foo')\n"],
161 161 )
162 162
163 163
164 164 def null_cleanup_transformer(lines):
165 165 """
166 166 A cleanup transform that returns an empty list.
167 167 """
168 168 return []
169 169
170 170
171 171 def test_check_make_token_by_line_never_ends_empty():
172 172 """
173 173 Check that not sequence of single or double characters ends up leading to en empty list of tokens
174 174 """
175 175 from string import printable
176 176
177 177 for c in printable:
178 178 assert make_tokens_by_line(c)[-1] != []
179 179 for k in printable:
180 180 assert make_tokens_by_line(c + k)[-1] != []
181 181
182 182
183 183 def check_find(transformer, case, match=True):
184 184 sample, expected_start, _ = case
185 185 tbl = make_tokens_by_line(sample)
186 186 res = transformer.find(tbl)
187 187 if match:
188 188 # start_line is stored 0-indexed, expected values are 1-indexed
189 189 assert (res.start_line + 1, res.start_col) == expected_start
190 190 return res
191 191 else:
192 192 assert res is None
193 193
194 194
195 195 def check_transform(transformer_cls, case):
196 196 lines, start, expected = case
197 197 transformer = transformer_cls(start)
198 198 assert transformer.transform(lines) == expected
199 199
200 200
201 201 def test_continued_line():
202 202 lines = MULTILINE_MAGIC_ASSIGN[0]
203 203 assert ipt2.find_end_of_continued_line(lines, 1) == 2
204 204
205 205 assert ipt2.assemble_continued_line(lines, (1, 5), 2) == "foo bar"
206 206
207 207
208 208 def test_find_assign_magic():
209 209 check_find(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
210 210 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN, match=False)
211 211 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT, match=False)
212 212
213 213
214 214 def test_transform_assign_magic():
215 215 check_transform(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
216 216
217 217
218 218 def test_find_assign_system():
219 219 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
220 220 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
221 221 check_find(ipt2.SystemAssign, (["a = !ls\n"], (1, 5), None))
222 222 check_find(ipt2.SystemAssign, (["a=!ls\n"], (1, 2), None))
223 223 check_find(ipt2.SystemAssign, MULTILINE_MAGIC_ASSIGN, match=False)
224 224
225 225
226 226 def test_transform_assign_system():
227 227 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
228 228 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
229 229
230 230
231 231 def test_find_magic_escape():
232 232 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC)
233 233 check_find(ipt2.EscapedCommand, INDENTED_MAGIC)
234 234 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC_ASSIGN, match=False)
235 235
236 236
237 237 def test_transform_magic_escape():
238 238 check_transform(ipt2.EscapedCommand, MULTILINE_MAGIC)
239 239 check_transform(ipt2.EscapedCommand, INDENTED_MAGIC)
240 240 check_transform(ipt2.EscapedCommand, CRLF_MAGIC)
241 241
242 242
243 243 def test_find_autocalls():
244 244 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
245 245 print("Testing %r" % case[0])
246 246 check_find(ipt2.EscapedCommand, case)
247 247
248 248
249 249 def test_transform_autocall():
250 250 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
251 251 print("Testing %r" % case[0])
252 252 check_transform(ipt2.EscapedCommand, case)
253 253
254 254
255 255 def test_find_help():
256 256 for case in [SIMPLE_HELP, DETAILED_HELP, MAGIC_HELP, HELP_IN_EXPR]:
257 257 check_find(ipt2.HelpEnd, case)
258 258
259 259 tf = check_find(ipt2.HelpEnd, HELP_CONTINUED_LINE)
260 260 assert tf.q_line == 1
261 261 assert tf.q_col == 3
262 262
263 263 tf = check_find(ipt2.HelpEnd, HELP_MULTILINE)
264 264 assert tf.q_line == 1
265 265 assert tf.q_col == 8
266 266
267 267 # ? in a comment does not trigger help
268 268 check_find(ipt2.HelpEnd, (["foo # bar?\n"], None, None), match=False)
269 269 # Nor in a string
270 270 check_find(ipt2.HelpEnd, (["foo = '''bar?\n"], None, None), match=False)
271 271
272 272
273 273 def test_transform_help():
274 274 tf = ipt2.HelpEnd((1, 0), (1, 9))
275 275 assert tf.transform(HELP_IN_EXPR[0]) == HELP_IN_EXPR[2]
276 276
277 277 tf = ipt2.HelpEnd((1, 0), (2, 3))
278 278 assert tf.transform(HELP_CONTINUED_LINE[0]) == HELP_CONTINUED_LINE[2]
279 279
280 280 tf = ipt2.HelpEnd((1, 0), (2, 8))
281 281 assert tf.transform(HELP_MULTILINE[0]) == HELP_MULTILINE[2]
282 282
283 283 tf = ipt2.HelpEnd((1, 0), (1, 0))
284 284 assert tf.transform(HELP_UNICODE[0]) == HELP_UNICODE[2]
285 285
286 286
287 287 def test_find_assign_op_dedent():
288 288 """
289 289 be careful that empty token like dedent are not counted as parens
290 290 """
291 291
292 292 class Tk:
293 293 def __init__(self, s):
294 294 self.string = s
295 295
296 296 assert _find_assign_op([Tk(s) for s in ("", "a", "=", "b")]) == 2
297 297 assert (
298 298 _find_assign_op([Tk(s) for s in ("", "(", "a", "=", "b", ")", "=", "5")]) == 6
299 299 )
300 300
301 301
302 302 extra_closing_paren_param = (
303 303 pytest.param("(\n))", "invalid", None)
304 304 if sys.version_info >= (3, 12)
305 305 else pytest.param("(\n))", "incomplete", 0)
306 306 )
307 307 examples = [
308 308 pytest.param("a = 1", "complete", None),
309 309 pytest.param("for a in range(5):", "incomplete", 4),
310 310 pytest.param("for a in range(5):\n if a > 0:", "incomplete", 8),
311 311 pytest.param("raise = 2", "invalid", None),
312 312 pytest.param("a = [1,\n2,", "incomplete", 0),
313 313 extra_closing_paren_param,
314 314 pytest.param("\\\r\n", "incomplete", 0),
315 315 pytest.param("a = '''\n hi", "incomplete", 3),
316 316 pytest.param("def a():\n x=1\n global x", "invalid", None),
317 pytest.param(
318 "a \\ ",
319 "invalid",
320 None,
321 marks=pytest.mark.xfail(
322 reason="Bug in python 3.9.8 – bpo 45738",
323 condition=sys.version_info in [(3, 11, 0, "alpha", 2)],
324 raises=SystemError,
325 strict=True,
326 ),
327 ), # Nothing allowed after backslash,
317 pytest.param("a \\ ", "invalid", None), # Nothing allowed after backslash,
328 318 pytest.param("1\\\n+2", "complete", None),
329 319 ]
330 320
331 321
332 322 @pytest.mark.parametrize("code, expected, number", examples)
333 323 def test_check_complete_param(code, expected, number):
334 324 cc = ipt2.TransformerManager().check_complete
335 325 assert cc(code) == (expected, number)
336 326
337 327
338 328 @pytest.mark.xfail(platform.python_implementation() == "PyPy", reason="fail on pypy")
339 @pytest.mark.xfail(
340 reason="Bug in python 3.9.8 – bpo 45738",
341 condition=sys.version_info in [(3, 11, 0, "alpha", 2)],
342 raises=SystemError,
343 strict=True,
344 )
345 329 def test_check_complete():
346 330 cc = ipt2.TransformerManager().check_complete
347 331
348 332 example = dedent(
349 333 """
350 334 if True:
351 335 a=1"""
352 336 )
353 337
354 338 assert cc(example) == ("incomplete", 4)
355 339 assert cc(example + "\n") == ("complete", None)
356 340 assert cc(example + "\n ") == ("complete", None)
357 341
358 342 # no need to loop on all the letters/numbers.
359 343 short = "12abAB" + string.printable[62:]
360 344 for c in short:
361 345 # test does not raise:
362 346 cc(c)
363 347 for k in short:
364 348 cc(c + k)
365 349
366 350 assert cc("def f():\n x=0\n \\\n ") == ("incomplete", 2)
367 351
368 352
369 353 @pytest.mark.xfail(platform.python_implementation() == "PyPy", reason="fail on pypy")
370 354 @pytest.mark.parametrize(
371 355 "value, expected",
372 356 [
373 357 ('''def foo():\n """''', ("incomplete", 4)),
374 358 ("""async with example:\n pass""", ("incomplete", 4)),
375 359 ("""async with example:\n pass\n """, ("complete", None)),
376 360 ],
377 361 )
378 362 def test_check_complete_II(value, expected):
379 363 """
380 364 Test that multiple line strings are properly handled.
381 365
382 366 Separate test function for convenience
383 367
384 368 """
385 369 cc = ipt2.TransformerManager().check_complete
386 370 assert cc(value) == expected
387 371
388 372
389 373 @pytest.mark.parametrize(
390 374 "value, expected",
391 375 [
392 376 (")", ("invalid", None)),
393 377 ("]", ("invalid", None)),
394 378 ("}", ("invalid", None)),
395 379 (")(", ("invalid", None)),
396 380 ("][", ("invalid", None)),
397 381 ("}{", ("invalid", None)),
398 382 ("]()(", ("invalid", None)),
399 383 ("())(", ("invalid", None)),
400 384 (")[](", ("invalid", None)),
401 385 ("()](", ("invalid", None)),
402 386 ],
403 387 )
404 388 def test_check_complete_invalidates_sunken_brackets(value, expected):
405 389 """
406 390 Test that a single line with more closing brackets than the opening ones is
407 391 interpreted as invalid
408 392 """
409 393 cc = ipt2.TransformerManager().check_complete
410 394 assert cc(value) == expected
411 395
412 396
413 397 def test_null_cleanup_transformer():
414 398 manager = ipt2.TransformerManager()
415 399 manager.cleanup_transforms.insert(0, null_cleanup_transformer)
416 400 assert manager.transform_cell("") == ""
417 401
418 402
419 403 def test_side_effects_I():
420 404 count = 0
421 405
422 406 def counter(lines):
423 407 nonlocal count
424 408 count += 1
425 409 return lines
426 410
427 411 counter.has_side_effects = True
428 412
429 413 manager = ipt2.TransformerManager()
430 414 manager.cleanup_transforms.insert(0, counter)
431 415 assert manager.check_complete("a=1\n") == ("complete", None)
432 416 assert count == 0
433 417
434 418
435 419 def test_side_effects_II():
436 420 count = 0
437 421
438 422 def counter(lines):
439 423 nonlocal count
440 424 count += 1
441 425 return lines
442 426
443 427 counter.has_side_effects = True
444 428
445 429 manager = ipt2.TransformerManager()
446 430 manager.line_transforms.insert(0, counter)
447 431 assert manager.check_complete("b=1\n") == ("complete", None)
448 432 assert count == 0
@@ -1,142 +1,137
1 1 #-----------------------------------------------------------------------------
2 2 # Copyright (C) 2010-2011, IPython Development Team.
3 3 #
4 4 # Distributed under the terms of the Modified BSD License.
5 5 #
6 6 # The full license is in the file COPYING.txt, distributed with this software.
7 7 #-----------------------------------------------------------------------------
8 8
9 9 import argparse
10 10 import sys
11 11
12 12 from IPython.core.magic_arguments import (argument, argument_group, kwds,
13 13 magic_arguments, parse_argstring, real_name)
14 14
15 15 LEADING_SPACE = "" if sys.version_info > (3, 13) else " "
16 16
17 17
18 18 @magic_arguments()
19 19 @argument('-f', '--foo', help="an argument")
20 20 def magic_foo1(self, args):
21 21 """ A docstring.
22 22 """
23 23 return parse_argstring(magic_foo1, args)
24 24
25 25
26 26 @magic_arguments()
27 27 def magic_foo2(self, args):
28 28 """ A docstring.
29 29 """
30 30 return parse_argstring(magic_foo2, args)
31 31
32 32
33 33 @magic_arguments()
34 34 @argument('-f', '--foo', help="an argument")
35 35 @argument_group('Group')
36 36 @argument('-b', '--bar', help="a grouped argument")
37 37 @argument_group('Second Group')
38 38 @argument('-z', '--baz', help="another grouped argument")
39 39 def magic_foo3(self, args):
40 40 """ A docstring.
41 41 """
42 42 return parse_argstring(magic_foo3, args)
43 43
44 44
45 45 @magic_arguments()
46 46 @kwds(argument_default=argparse.SUPPRESS)
47 47 @argument('-f', '--foo', help="an argument")
48 48 def magic_foo4(self, args):
49 49 """ A docstring.
50 50 """
51 51 return parse_argstring(magic_foo4, args)
52 52
53 53
54 54 @magic_arguments('frobnicate')
55 55 @argument('-f', '--foo', help="an argument")
56 56 def magic_foo5(self, args):
57 57 """ A docstring.
58 58 """
59 59 return parse_argstring(magic_foo5, args)
60 60
61 61
62 62 @magic_arguments()
63 63 @argument('-f', '--foo', help="an argument")
64 64 def magic_magic_foo(self, args):
65 65 """ A docstring.
66 66 """
67 67 return parse_argstring(magic_magic_foo, args)
68 68
69 69
70 70 @magic_arguments()
71 71 @argument('-f', '--foo', help="an argument")
72 72 def foo(self, args):
73 73 """ A docstring.
74 74 """
75 75 return parse_argstring(foo, args)
76 76
77 77
78 78 def test_magic_arguments():
79 # “optional arguments” was replaced with “options” in argparse help
80 # https://docs.python.org/3/whatsnew/3.10.html#argparse
81 # https://bugs.python.org/issue9694
82 options = "optional arguments" if sys.version_info < (3, 10) else "options"
83
84 79 assert (
85 80 magic_foo1.__doc__
86 == f"::\n\n %foo1 [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\n{options}:\n -f FOO, --foo FOO an argument\n"
81 == f"::\n\n %foo1 [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\noptions:\n -f FOO, --foo FOO an argument\n"
87 82 )
88 83 assert getattr(magic_foo1, "argcmd_name", None) == None
89 84 assert real_name(magic_foo1) == "foo1"
90 85 assert magic_foo1(None, "") == argparse.Namespace(foo=None)
91 86 assert hasattr(magic_foo1, "has_arguments")
92 87
93 88 assert magic_foo2.__doc__ == f"::\n\n %foo2\n\n{LEADING_SPACE}A docstring.\n"
94 89 assert getattr(magic_foo2, "argcmd_name", None) == None
95 90 assert real_name(magic_foo2) == "foo2"
96 91 assert magic_foo2(None, "") == argparse.Namespace()
97 92 assert hasattr(magic_foo2, "has_arguments")
98 93
99 94 assert (
100 95 magic_foo3.__doc__
101 == f"::\n\n %foo3 [-f FOO] [-b BAR] [-z BAZ]\n\n{LEADING_SPACE}A docstring.\n\n{options}:\n -f FOO, --foo FOO an argument\n\nGroup:\n -b BAR, --bar BAR a grouped argument\n\nSecond Group:\n -z BAZ, --baz BAZ another grouped argument\n"
96 == f"::\n\n %foo3 [-f FOO] [-b BAR] [-z BAZ]\n\n{LEADING_SPACE}A docstring.\n\noptions:\n -f FOO, --foo FOO an argument\n\nGroup:\n -b BAR, --bar BAR a grouped argument\n\nSecond Group:\n -z BAZ, --baz BAZ another grouped argument\n"
102 97 )
103 98 assert getattr(magic_foo3, "argcmd_name", None) == None
104 99 assert real_name(magic_foo3) == "foo3"
105 100 assert magic_foo3(None, "") == argparse.Namespace(bar=None, baz=None, foo=None)
106 101 assert hasattr(magic_foo3, "has_arguments")
107 102
108 103 assert (
109 104 magic_foo4.__doc__
110 == f"::\n\n %foo4 [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\n{options}:\n -f FOO, --foo FOO an argument\n"
105 == f"::\n\n %foo4 [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\noptions:\n -f FOO, --foo FOO an argument\n"
111 106 )
112 107 assert getattr(magic_foo4, "argcmd_name", None) == None
113 108 assert real_name(magic_foo4) == "foo4"
114 109 assert magic_foo4(None, "") == argparse.Namespace()
115 110 assert hasattr(magic_foo4, "has_arguments")
116 111
117 112 assert (
118 113 magic_foo5.__doc__
119 == f"::\n\n %frobnicate [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\n{options}:\n -f FOO, --foo FOO an argument\n"
114 == f"::\n\n %frobnicate [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\noptions:\n -f FOO, --foo FOO an argument\n"
120 115 )
121 116 assert getattr(magic_foo5, "argcmd_name", None) == "frobnicate"
122 117 assert real_name(magic_foo5) == "frobnicate"
123 118 assert magic_foo5(None, "") == argparse.Namespace(foo=None)
124 119 assert hasattr(magic_foo5, "has_arguments")
125 120
126 121 assert (
127 122 magic_magic_foo.__doc__
128 == f"::\n\n %magic_foo [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\n{options}:\n -f FOO, --foo FOO an argument\n"
123 == f"::\n\n %magic_foo [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\noptions:\n -f FOO, --foo FOO an argument\n"
129 124 )
130 125 assert getattr(magic_magic_foo, "argcmd_name", None) == None
131 126 assert real_name(magic_magic_foo) == "magic_foo"
132 127 assert magic_magic_foo(None, "") == argparse.Namespace(foo=None)
133 128 assert hasattr(magic_magic_foo, "has_arguments")
134 129
135 130 assert (
136 131 foo.__doc__
137 == f"::\n\n %foo [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\n{options}:\n -f FOO, --foo FOO an argument\n"
132 == f"::\n\n %foo [-f FOO]\n\n{LEADING_SPACE}A docstring.\n\noptions:\n -f FOO, --foo FOO an argument\n"
138 133 )
139 134 assert getattr(foo, "argcmd_name", None) == None
140 135 assert real_name(foo) == "foo"
141 136 assert foo(None, "") == argparse.Namespace(foo=None)
142 137 assert hasattr(foo, "has_arguments")
@@ -1,540 +1,538
1 1 # coding: utf-8
2 2 """Tests for IPython.lib.pretty."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7
8 8 from collections import Counter, defaultdict, deque, OrderedDict, UserList
9 9 import os
10 10 import pytest
11 11 import types
12 12 import string
13 13 import sys
14 14 import unittest
15 15
16 16 import pytest
17 17
18 18 from IPython.lib import pretty
19 19
20 20 from io import StringIO
21 21
22 22
23 23 class MyList(object):
24 24 def __init__(self, content):
25 25 self.content = content
26 26 def _repr_pretty_(self, p, cycle):
27 27 if cycle:
28 28 p.text("MyList(...)")
29 29 else:
30 30 with p.group(3, "MyList(", ")"):
31 31 for (i, child) in enumerate(self.content):
32 32 if i:
33 33 p.text(",")
34 34 p.breakable()
35 35 else:
36 36 p.breakable("")
37 37 p.pretty(child)
38 38
39 39
40 40 class MyDict(dict):
41 41 def _repr_pretty_(self, p, cycle):
42 42 p.text("MyDict(...)")
43 43
44 44 class MyObj(object):
45 45 def somemethod(self):
46 46 pass
47 47
48 48
49 49 class Dummy1(object):
50 50 def _repr_pretty_(self, p, cycle):
51 51 p.text("Dummy1(...)")
52 52
53 53 class Dummy2(Dummy1):
54 54 _repr_pretty_ = None
55 55
56 56 class NoModule(object):
57 57 pass
58 58
59 59 NoModule.__module__ = None
60 60
61 61 class Breaking(object):
62 62 def _repr_pretty_(self, p, cycle):
63 63 with p.group(4,"TG: ",":"):
64 64 p.text("Breaking(")
65 65 p.break_()
66 66 p.text(")")
67 67
68 68 class BreakingRepr(object):
69 69 def __repr__(self):
70 70 return "Breaking(\n)"
71 71
72 72 class BadRepr(object):
73 73 def __repr__(self):
74 74 return 1/0
75 75
76 76
77 77 def test_indentation():
78 78 """Test correct indentation in groups"""
79 79 count = 40
80 80 gotoutput = pretty.pretty(MyList(range(count)))
81 81 expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
82 82
83 83 assert gotoutput == expectedoutput
84 84
85 85
86 86 def test_dispatch():
87 87 """
88 88 Test correct dispatching: The _repr_pretty_ method for MyDict
89 89 must be found before the registered printer for dict.
90 90 """
91 91 gotoutput = pretty.pretty(MyDict())
92 92 expectedoutput = "MyDict(...)"
93 93
94 94 assert gotoutput == expectedoutput
95 95
96 96
97 97 def test_callability_checking():
98 98 """
99 99 Test that the _repr_pretty_ method is tested for callability and skipped if
100 100 not.
101 101 """
102 102 gotoutput = pretty.pretty(Dummy2())
103 103 expectedoutput = "Dummy1(...)"
104 104
105 105 assert gotoutput == expectedoutput
106 106
107 107
108 108 @pytest.mark.parametrize(
109 109 "obj,expected_output",
110 110 zip(
111 111 [
112 112 set(),
113 113 frozenset(),
114 114 set([1]),
115 115 frozenset([1]),
116 116 set([1, 2]),
117 117 frozenset([1, 2]),
118 118 set([-1, -2, -3]),
119 119 ],
120 120 [
121 121 "set()",
122 122 "frozenset()",
123 123 "{1}",
124 124 "frozenset({1})",
125 125 "{1, 2}",
126 126 "frozenset({1, 2})",
127 127 "{-3, -2, -1}",
128 128 ],
129 129 ),
130 130 )
131 131 def test_sets(obj, expected_output):
132 132 """
133 133 Test that set and frozenset use Python 3 formatting.
134 134 """
135 135 got_output = pretty.pretty(obj)
136 136 assert got_output == expected_output
137 137
138 138
139 139 def test_pprint_heap_allocated_type():
140 140 """
141 141 Test that pprint works for heap allocated types.
142 142 """
143 143 module_name = "xxlimited_35"
144 expected_output = (
145 "xxlimited.Null" if sys.version_info < (3, 10, 6) else "xxlimited_35.Null"
146 )
144 expected_output = "xxlimited_35.Null"
147 145 xxlimited = pytest.importorskip(module_name)
148 146 output = pretty.pretty(xxlimited.Null)
149 147 assert output == expected_output
150 148
151 149
152 150 def test_pprint_nomod():
153 151 """
154 152 Test that pprint works for classes with no __module__.
155 153 """
156 154 output = pretty.pretty(NoModule)
157 155 assert output == "NoModule"
158 156
159 157
160 158 def test_pprint_break():
161 159 """
162 160 Test that p.break_ produces expected output
163 161 """
164 162 output = pretty.pretty(Breaking())
165 163 expected = "TG: Breaking(\n ):"
166 164 assert output == expected
167 165
168 166 def test_pprint_break_repr():
169 167 """
170 168 Test that p.break_ is used in repr
171 169 """
172 170 output = pretty.pretty([[BreakingRepr()]])
173 171 expected = "[[Breaking(\n )]]"
174 172 assert output == expected
175 173
176 174 output = pretty.pretty([[BreakingRepr()]*2])
177 175 expected = "[[Breaking(\n ),\n Breaking(\n )]]"
178 176 assert output == expected
179 177
180 178 def test_bad_repr():
181 179 """Don't catch bad repr errors"""
182 180 with pytest.raises(ZeroDivisionError):
183 181 pretty.pretty(BadRepr())
184 182
185 183 class BadException(Exception):
186 184 def __str__(self):
187 185 return -1
188 186
189 187 class ReallyBadRepr(object):
190 188 __module__ = 1
191 189 @property
192 190 def __class__(self):
193 191 raise ValueError("I am horrible")
194 192
195 193 def __repr__(self):
196 194 raise BadException()
197 195
198 196 def test_really_bad_repr():
199 197 with pytest.raises(BadException):
200 198 pretty.pretty(ReallyBadRepr())
201 199
202 200
203 201 class SA(object):
204 202 pass
205 203
206 204 class SB(SA):
207 205 pass
208 206
209 207 class TestsPretty(unittest.TestCase):
210 208
211 209 def test_super_repr(self):
212 210 # "<super: module_name.SA, None>"
213 211 output = pretty.pretty(super(SA))
214 212 self.assertRegex(output, r"<super: \S+.SA, None>")
215 213
216 214 # "<super: module_name.SA, <module_name.SB at 0x...>>"
217 215 sb = SB()
218 216 output = pretty.pretty(super(SA, sb))
219 217 self.assertRegex(output, r"<super: \S+.SA,\s+<\S+.SB at 0x\S+>>")
220 218
221 219
222 220 def test_long_list(self):
223 221 lis = list(range(10000))
224 222 p = pretty.pretty(lis)
225 223 last2 = p.rsplit('\n', 2)[-2:]
226 224 self.assertEqual(last2, [' 999,', ' ...]'])
227 225
228 226 def test_long_set(self):
229 227 s = set(range(10000))
230 228 p = pretty.pretty(s)
231 229 last2 = p.rsplit('\n', 2)[-2:]
232 230 self.assertEqual(last2, [' 999,', ' ...}'])
233 231
234 232 def test_long_tuple(self):
235 233 tup = tuple(range(10000))
236 234 p = pretty.pretty(tup)
237 235 last2 = p.rsplit('\n', 2)[-2:]
238 236 self.assertEqual(last2, [' 999,', ' ...)'])
239 237
240 238 def test_long_dict(self):
241 239 d = { n:n for n in range(10000) }
242 240 p = pretty.pretty(d)
243 241 last2 = p.rsplit('\n', 2)[-2:]
244 242 self.assertEqual(last2, [' 999: 999,', ' ...}'])
245 243
246 244 def test_unbound_method(self):
247 245 output = pretty.pretty(MyObj.somemethod)
248 246 self.assertIn('MyObj.somemethod', output)
249 247
250 248
251 249 class MetaClass(type):
252 250 def __new__(cls, name):
253 251 return type.__new__(cls, name, (object,), {'name': name})
254 252
255 253 def __repr__(self):
256 254 return "[CUSTOM REPR FOR CLASS %s]" % self.name
257 255
258 256
259 257 ClassWithMeta = MetaClass('ClassWithMeta')
260 258
261 259
262 260 def test_metaclass_repr():
263 261 output = pretty.pretty(ClassWithMeta)
264 262 assert output == "[CUSTOM REPR FOR CLASS ClassWithMeta]"
265 263
266 264
267 265 def test_unicode_repr():
268 266 u = u"üniçodé"
269 267 ustr = u
270 268
271 269 class C(object):
272 270 def __repr__(self):
273 271 return ustr
274 272
275 273 c = C()
276 274 p = pretty.pretty(c)
277 275 assert p == u
278 276 p = pretty.pretty([c])
279 277 assert p == "[%s]" % u
280 278
281 279
282 280 def test_basic_class():
283 281 def type_pprint_wrapper(obj, p, cycle):
284 282 if obj is MyObj:
285 283 type_pprint_wrapper.called = True
286 284 return pretty._type_pprint(obj, p, cycle)
287 285 type_pprint_wrapper.called = False
288 286
289 287 stream = StringIO()
290 288 printer = pretty.RepresentationPrinter(stream)
291 289 printer.type_pprinters[type] = type_pprint_wrapper
292 290 printer.pretty(MyObj)
293 291 printer.flush()
294 292 output = stream.getvalue()
295 293
296 294 assert output == "%s.MyObj" % __name__
297 295 assert type_pprint_wrapper.called is True
298 296
299 297
300 298 def test_collections_userlist():
301 299 # Create userlist with cycle
302 300 a = UserList()
303 301 a.append(a)
304 302
305 303 cases = [
306 304 (UserList(), "UserList([])"),
307 305 (
308 306 UserList(i for i in range(1000, 1020)),
309 307 "UserList([1000,\n"
310 308 " 1001,\n"
311 309 " 1002,\n"
312 310 " 1003,\n"
313 311 " 1004,\n"
314 312 " 1005,\n"
315 313 " 1006,\n"
316 314 " 1007,\n"
317 315 " 1008,\n"
318 316 " 1009,\n"
319 317 " 1010,\n"
320 318 " 1011,\n"
321 319 " 1012,\n"
322 320 " 1013,\n"
323 321 " 1014,\n"
324 322 " 1015,\n"
325 323 " 1016,\n"
326 324 " 1017,\n"
327 325 " 1018,\n"
328 326 " 1019])",
329 327 ),
330 328 (a, "UserList([UserList(...)])"),
331 329 ]
332 330 for obj, expected in cases:
333 331 assert pretty.pretty(obj) == expected
334 332
335 333
336 334 # TODO : pytest.mark.parametrise once nose is gone.
337 335 def test_collections_defaultdict():
338 336 # Create defaultdicts with cycles
339 337 a = defaultdict()
340 338 a.default_factory = a
341 339 b = defaultdict(list)
342 340 b['key'] = b
343 341
344 342 # Dictionary order cannot be relied on, test against single keys.
345 343 cases = [
346 344 (defaultdict(list), 'defaultdict(list, {})'),
347 345 (defaultdict(list, {'key': '-' * 50}),
348 346 "defaultdict(list,\n"
349 347 " {'key': '--------------------------------------------------'})"),
350 348 (a, 'defaultdict(defaultdict(...), {})'),
351 349 (b, "defaultdict(list, {'key': defaultdict(...)})"),
352 350 ]
353 351 for obj, expected in cases:
354 352 assert pretty.pretty(obj) == expected
355 353
356 354
357 355 # TODO : pytest.mark.parametrise once nose is gone.
358 356 def test_collections_ordereddict():
359 357 # Create OrderedDict with cycle
360 358 a = OrderedDict()
361 359 a['key'] = a
362 360
363 361 cases = [
364 362 (OrderedDict(), 'OrderedDict()'),
365 363 (OrderedDict((i, i) for i in range(1000, 1010)),
366 364 'OrderedDict([(1000, 1000),\n'
367 365 ' (1001, 1001),\n'
368 366 ' (1002, 1002),\n'
369 367 ' (1003, 1003),\n'
370 368 ' (1004, 1004),\n'
371 369 ' (1005, 1005),\n'
372 370 ' (1006, 1006),\n'
373 371 ' (1007, 1007),\n'
374 372 ' (1008, 1008),\n'
375 373 ' (1009, 1009)])'),
376 374 (a, "OrderedDict([('key', OrderedDict(...))])"),
377 375 ]
378 376 for obj, expected in cases:
379 377 assert pretty.pretty(obj) == expected
380 378
381 379
382 380 # TODO : pytest.mark.parametrise once nose is gone.
383 381 def test_collections_deque():
384 382 # Create deque with cycle
385 383 a = deque()
386 384 a.append(a)
387 385
388 386 cases = [
389 387 (deque(), 'deque([])'),
390 388 (deque(i for i in range(1000, 1020)),
391 389 'deque([1000,\n'
392 390 ' 1001,\n'
393 391 ' 1002,\n'
394 392 ' 1003,\n'
395 393 ' 1004,\n'
396 394 ' 1005,\n'
397 395 ' 1006,\n'
398 396 ' 1007,\n'
399 397 ' 1008,\n'
400 398 ' 1009,\n'
401 399 ' 1010,\n'
402 400 ' 1011,\n'
403 401 ' 1012,\n'
404 402 ' 1013,\n'
405 403 ' 1014,\n'
406 404 ' 1015,\n'
407 405 ' 1016,\n'
408 406 ' 1017,\n'
409 407 ' 1018,\n'
410 408 ' 1019])'),
411 409 (a, 'deque([deque(...)])'),
412 410 ]
413 411 for obj, expected in cases:
414 412 assert pretty.pretty(obj) == expected
415 413
416 414
417 415 # TODO : pytest.mark.parametrise once nose is gone.
418 416 def test_collections_counter():
419 417 class MyCounter(Counter):
420 418 pass
421 419 cases = [
422 420 (Counter(), 'Counter()'),
423 421 (Counter(a=1), "Counter({'a': 1})"),
424 422 (MyCounter(a=1), "MyCounter({'a': 1})"),
425 423 (Counter(a=1, c=22), "Counter({'c': 22, 'a': 1})"),
426 424 ]
427 425 for obj, expected in cases:
428 426 assert pretty.pretty(obj) == expected
429 427
430 428 # TODO : pytest.mark.parametrise once nose is gone.
431 429 def test_mappingproxy():
432 430 MP = types.MappingProxyType
433 431 underlying_dict = {}
434 432 mp_recursive = MP(underlying_dict)
435 433 underlying_dict[2] = mp_recursive
436 434 underlying_dict[3] = underlying_dict
437 435
438 436 cases = [
439 437 (MP({}), "mappingproxy({})"),
440 438 (MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"),
441 439 (MP({k: k.upper() for k in string.ascii_lowercase}),
442 440 "mappingproxy({'a': 'A',\n"
443 441 " 'b': 'B',\n"
444 442 " 'c': 'C',\n"
445 443 " 'd': 'D',\n"
446 444 " 'e': 'E',\n"
447 445 " 'f': 'F',\n"
448 446 " 'g': 'G',\n"
449 447 " 'h': 'H',\n"
450 448 " 'i': 'I',\n"
451 449 " 'j': 'J',\n"
452 450 " 'k': 'K',\n"
453 451 " 'l': 'L',\n"
454 452 " 'm': 'M',\n"
455 453 " 'n': 'N',\n"
456 454 " 'o': 'O',\n"
457 455 " 'p': 'P',\n"
458 456 " 'q': 'Q',\n"
459 457 " 'r': 'R',\n"
460 458 " 's': 'S',\n"
461 459 " 't': 'T',\n"
462 460 " 'u': 'U',\n"
463 461 " 'v': 'V',\n"
464 462 " 'w': 'W',\n"
465 463 " 'x': 'X',\n"
466 464 " 'y': 'Y',\n"
467 465 " 'z': 'Z'})"),
468 466 (mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"),
469 467 (underlying_dict,
470 468 "{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"),
471 469 ]
472 470 for obj, expected in cases:
473 471 assert pretty.pretty(obj) == expected
474 472
475 473
476 474 # TODO : pytest.mark.parametrise once nose is gone.
477 475 def test_simplenamespace():
478 476 SN = types.SimpleNamespace
479 477
480 478 sn_recursive = SN()
481 479 sn_recursive.first = sn_recursive
482 480 sn_recursive.second = sn_recursive
483 481 cases = [
484 482 (SN(), "namespace()"),
485 483 (SN(x=SN()), "namespace(x=namespace())"),
486 484 (SN(a_long_name=[SN(s=string.ascii_lowercase)]*3, a_short_name=None),
487 485 "namespace(a_long_name=[namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
488 486 " namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
489 487 " namespace(s='abcdefghijklmnopqrstuvwxyz')],\n"
490 488 " a_short_name=None)"),
491 489 (sn_recursive, "namespace(first=namespace(...), second=namespace(...))"),
492 490 ]
493 491 for obj, expected in cases:
494 492 assert pretty.pretty(obj) == expected
495 493
496 494
497 495 def test_pretty_environ():
498 496 dict_repr = pretty.pretty(dict(os.environ))
499 497 # reindent to align with 'environ' prefix
500 498 dict_indented = dict_repr.replace('\n', '\n' + (' ' * len('environ')))
501 499 env_repr = pretty.pretty(os.environ)
502 500 assert env_repr == "environ" + dict_indented
503 501
504 502
505 503 def test_function_pretty():
506 504 "Test pretty print of function"
507 505 # posixpath is a pure python module, its interface is consistent
508 506 # across Python distributions
509 507 import posixpath
510 508
511 509 assert pretty.pretty(posixpath.join) == "<function posixpath.join(a, *p)>"
512 510
513 511 # custom function
514 512 def meaning_of_life(question=None):
515 513 if question:
516 514 return 42
517 515 return "Don't panic"
518 516
519 517 assert "meaning_of_life(question=None)" in pretty.pretty(meaning_of_life)
520 518
521 519
522 520 class OrderedCounter(Counter, OrderedDict):
523 521 'Counter that remembers the order elements are first encountered'
524 522
525 523 def __repr__(self):
526 524 return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
527 525
528 526 def __reduce__(self):
529 527 return self.__class__, (OrderedDict(self),)
530 528
531 529 class MySet(set): # Override repr of a basic type
532 530 def __repr__(self):
533 531 return 'mine'
534 532
535 533 def test_custom_repr():
536 534 """A custom repr should override a pretty printer for a parent type"""
537 535 oc = OrderedCounter("abracadabra")
538 536 assert "OrderedCounter(OrderedDict" in pretty.pretty(oc)
539 537
540 538 assert pretty.pretty(MySet()) == "mine"
@@ -1,272 +1,269
1 1 # -*- coding: utf-8 -*-
2 2 #
3 3 # IPython documentation build configuration file.
4 4
5 5 # NOTE: This file has been edited manually from the auto-generated one from
6 6 # sphinx. Do NOT delete and re-generate. If any changes from sphinx are
7 7 # needed, generate a scratch one and merge by hand any new fields needed.
8 8
9 9 #
10 10 # This file is execfile()d with the current directory set to its containing dir.
11 11 #
12 12 # The contents of this file are pickled, so don't put values in the namespace
13 13 # that aren't pickleable (module imports are okay, they're removed automatically).
14 14 #
15 15 # All configuration values have a default value; values that are commented out
16 16 # serve to show the default value.
17 17
18 18
19 19 import sys, os
20 20 from pathlib import Path
21 21
22 if sys.version_info > (3, 11):
23 import tomllib
24 else:
25 import tomli as tomllib
22 import tomllib
26 23
27 24 with open("./sphinx.toml", "rb") as f:
28 25 config = tomllib.load(f)
29 26
30 27 # https://read-the-docs.readthedocs.io/en/latest/faq.html
31 28 ON_RTD = os.environ.get("READTHEDOCS", None) == "True"
32 29
33 30 if ON_RTD:
34 31 tags.add("rtd")
35 32
36 33 # RTD doesn't use the Makefile, so re-run autogen_{things}.py here.
37 34 for name in ("config", "api", "magics", "shortcuts"):
38 35 fname = Path("autogen_{}.py".format(name))
39 36 fpath = (Path(__file__).parent).joinpath("..", fname)
40 37 with open(fpath, encoding="utf-8") as f:
41 38 exec(
42 39 compile(f.read(), fname, "exec"),
43 40 {
44 41 "__file__": fpath,
45 42 "__name__": "__main__",
46 43 },
47 44 )
48 45 import sphinx_rtd_theme
49 46
50 47 # Allow Python scripts to change behaviour during sphinx run
51 48 os.environ["IN_SPHINX_RUN"] = "True"
52 49
53 50 autodoc_type_aliases = {
54 51 "Matcher": " IPython.core.completer.Matcher",
55 52 "MatcherAPIv1": " IPython.core.completer.MatcherAPIv1",
56 53 }
57 54
58 55 # If your extensions are in another directory, add it here. If the directory
59 56 # is relative to the documentation root, use os.path.abspath to make it
60 57 # absolute, like shown here.
61 58 sys.path.insert(0, os.path.abspath("../sphinxext"))
62 59
63 60 # We load the ipython release info into a dict by explicit execution
64 61 iprelease = {}
65 62 exec(
66 63 compile(
67 64 open("../../IPython/core/release.py", encoding="utf-8").read(),
68 65 "../../IPython/core/release.py",
69 66 "exec",
70 67 ),
71 68 iprelease,
72 69 )
73 70
74 71 # General configuration
75 72 # ---------------------
76 73
77 74 # - template_path: Add any paths that contain templates here, relative to this directory.
78 75 # - master_doc: The master toctree document.
79 76 # - project
80 77 # - copyright
81 78 # - github_project_url
82 79 # - source_suffix = config["sphinx"]["source_suffix"]
83 80 # - exclude_patterns:
84 81 # Exclude these glob-style patterns when looking for source files.
85 82 # They are relative to the source/ directory.
86 83 # - pygments_style: The name of the Pygments (syntax highlighting) style to use.
87 84 # - extensions:
88 85 # Add any Sphinx extension module names here, as strings. They can be extensions
89 86 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
90 87 # - default_role
91 88 # - modindex_common_prefix
92 89
93 90 locals().update(config["sphinx"])
94 91
95 92 try:
96 93 from intersphinx_registry import get_intersphinx_mapping
97 94
98 95 intersphinx_mapping = get_intersphinx_mapping(
99 96 packages={
100 97 "python",
101 98 "rpy2",
102 99 "jupyterclient",
103 100 "jupyter",
104 101 "jedi",
105 102 "traitlets",
106 103 "ipykernel",
107 104 "prompt_toolkit",
108 105 "ipywidgets",
109 106 "ipyparallel",
110 107 "pip",
111 108 }
112 109 )
113 110
114 111 except ModuleNotFoundError:
115 112 # In case intersphinx_registry is not yet packages on current platform
116 113 # as it is quite recent.
117 114 print("/!\\ intersphinx_registry not installed, relying on local mapping.")
118 115 intersphinx_mapping = config["intersphinx_mapping"]
119 116 for k, v in intersphinx_mapping.items():
120 117 intersphinx_mapping[k] = tuple(
121 118 [intersphinx_mapping[k]["url"], intersphinx_mapping[k]["fallback"]]
122 119 )
123 120
124 121
125 122 # numpydoc config
126 123 numpydoc_show_class_members = config["numpydoc"][
127 124 "numpydoc_show_class_members"
128 125 ] # Otherwise Sphinx emits thousands of warnings
129 126 numpydoc_class_members_toctree = config["numpydoc"]["numpydoc_class_members_toctree"]
130 127 warning_is_error = config["numpydoc"]["warning_is_error"]
131 128
132 129 # Options for HTML output
133 130 # -----------------------
134 131 # - html_theme
135 132 # - html_static_path
136 133 # Add any paths that contain custom static files (such as style sheets) here,
137 134 # relative to this directory. They are copied after the builtin static files,
138 135 # so a file named "default.css" will overwrite the builtin "default.css".
139 136 # Favicon needs the directory name
140 137 # - html_favicon
141 138 # - html_last_updated_fmt = config["html"]["html_last_updated_fmt"]
142 139 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
143 140 # using the given strftime format.
144 141 # Output file base name for HTML help builder.
145 142 # - htmlhelp_basename
146 143 locals().update(config["html"])
147 144
148 145 # Additional templates that should be rendered to pages, maps page names to
149 146 # template names.
150 147 html_additional_pages = {}
151 148 for item in config["html"]["html_additional_pages"]:
152 149 html_additional_pages[item[0]] = item[1]
153 150
154 151 # Options for LaTeX output
155 152 # ------------------------
156 153
157 154 # Grouping the document tree into LaTeX files. List of tuples
158 155 # (source start file, target name, title, author, document class [howto/manual]).
159 156 latex_documents = []
160 157 for item in config["latex"]["latex_documents"]:
161 158 latex_documents.append(tuple(item))
162 159 # If false, no module index is generated.
163 160 latex_use_modindex = config["latex"]["latex_use_modindex"]
164 161 # The font size ('10pt', '11pt' or '12pt').
165 162 latex_font_size = config["latex"]["latex_font_size"]
166 163
167 164 # Options for texinfo output
168 165 # --------------------------
169 166 texinfo_documents = [
170 167 (
171 168 master_doc,
172 169 "ipython",
173 170 "IPython Documentation",
174 171 "The IPython Development Team",
175 172 "IPython",
176 173 "IPython Documentation",
177 174 "Programming",
178 175 1,
179 176 ),
180 177 ]
181 178
182 179 #########################################################################
183 180 # Custom configuration
184 181 # The default replacements for |version| and |release|, also used in various
185 182 # other places throughout the built documents.
186 183 #
187 184 # The full version, including alpha/beta/rc tags.
188 185 release = "%s" % iprelease["version"]
189 186 # Just the X.Y.Z part, no '-dev'
190 187 version = iprelease["version"].split("-", 1)[0]
191 188
192 189 # There are two options for replacing |today|: either, you set today to some
193 190 # non-false value, then it is used:
194 191 # today = ''
195 192 # Else, today_fmt is used as the format for a strftime call.
196 193 today_fmt = "%B %d, %Y"
197 194
198 195 rst_prolog = ""
199 196
200 197
201 198 def is_stable(extra):
202 199 for ext in {"dev", "b", "rc"}:
203 200 if ext in extra:
204 201 return False
205 202 return True
206 203
207 204
208 205 if is_stable(iprelease["_version_extra"]):
209 206 tags.add("ipystable")
210 207 print("Adding Tag: ipystable")
211 208 else:
212 209 tags.add("ipydev")
213 210 print("Adding Tag: ipydev")
214 211 rst_prolog += """
215 212 .. warning::
216 213
217 214 This documentation covers a development version of IPython. The development
218 215 version may differ significantly from the latest stable release.
219 216 """
220 217
221 218 rst_prolog += """
222 219 .. important::
223 220
224 221 This documentation covers IPython versions 6.0 and higher. Beginning with
225 222 version 6.0, IPython stopped supporting compatibility with Python versions
226 223 lower than 3.3 including all versions of Python 2.7.
227 224
228 225 If you are looking for an IPython version compatible with Python 2.7,
229 226 please use the IPython 5.x LTS release and refer to its documentation (LTS
230 227 is the long term support release).
231 228
232 229 """
233 230
234 231 import logging
235 232
236 233
237 234 class ConfigtraitFilter(logging.Filter):
238 235 """
239 236 This is a filter to remove in sphinx 3+ the error about config traits being duplicated.
240 237
241 238 As we autogenerate configuration traits from, subclasses have lots of
242 239 duplication and we want to silence them. Indeed we build on travis with
243 240 warnings-as-error set to True, so those duplicate items make the build fail.
244 241 """
245 242
246 243 def filter(self, record):
247 244 if (
248 245 record.args
249 246 and record.args[0] == "configtrait"
250 247 and "duplicate" in record.msg
251 248 ):
252 249 return False
253 250 return True
254 251
255 252
256 253 ct_filter = ConfigtraitFilter()
257 254
258 255 import sphinx.util
259 256
260 257 logger = sphinx.util.logging.getLogger("sphinx.domains.std").logger
261 258 logger.addFilter(ct_filter)
262 259
263 260
264 261 def setup(app):
265 262 app.add_css_file("theme_overrides.css")
266 263
267 264
268 265 # Cleanup
269 266 # -------
270 267 # delete release info to avoid pickling errors from sphinx
271 268
272 269 del iprelease
@@ -1,397 +1,395
1 1 [build-system]
2 2 requires = ["setuptools>=61.2"]
3 3 # We need access to the 'setupbase' module at build time.
4 4 # Hence we declare a custom build backend.
5 5 build-backend = "_build_meta" # just re-exports setuptools.build_meta definitions
6 6 backend-path = ["."]
7 7
8 8 [project]
9 9 name = "ipython"
10 10 description = "IPython: Productive Interactive Computing"
11 11 keywords = ["Interactive", "Interpreter", "Shell", "Embedding"]
12 12 classifiers = [
13 13 "Framework :: IPython",
14 14 "Framework :: Jupyter",
15 15 "Intended Audience :: Developers",
16 16 "Intended Audience :: Science/Research",
17 17 "License :: OSI Approved :: BSD License",
18 18 "Programming Language :: Python",
19 19 "Programming Language :: Python :: 3",
20 20 "Programming Language :: Python :: 3 :: Only",
21 21 "Topic :: System :: Shells",
22 22 ]
23 requires-python = ">=3.10"
23 requires-python = ">=3.11"
24 24 dependencies = [
25 25 'colorama; sys_platform == "win32"',
26 26 "decorator",
27 "exceptiongroup; python_version<'3.11'",
28 27 "jedi>=0.16",
29 28 "matplotlib-inline",
30 29 'pexpect>4.3; sys_platform != "win32" and sys_platform != "emscripten"',
31 30 "prompt_toolkit>=3.0.41,<3.1.0",
32 31 "pygments>=2.4.0",
33 32 "stack_data",
34 33 "traitlets>=5.13.0",
35 34 "typing_extensions>=4.6; python_version<'3.12'",
36 35 ]
37 36 dynamic = ["authors", "license", "version"]
38 37
39 38 [project.entry-points."pygments.lexers"]
40 39 ipythonconsole = "IPython.lib.lexers:IPythonConsoleLexer"
41 40 ipython = "IPython.lib.lexers:IPythonLexer"
42 41 ipython3 = "IPython.lib.lexers:IPython3Lexer"
43 42
44 43 [project.scripts]
45 44 ipython = "IPython:start_ipython"
46 45 ipython3 = "IPython:start_ipython"
47 46
48 47 [project.readme]
49 48 file = "long_description.rst"
50 49 content-type = "text/x-rst"
51 50
52 51 [project.urls]
53 52 Homepage = "https://ipython.org"
54 53 Documentation = "https://ipython.readthedocs.io/"
55 54 Funding = "https://numfocus.org/"
56 55 Source = "https://github.com/ipython/ipython"
57 56 Tracker = "https://github.com/ipython/ipython/issues"
58 57
59 58 [project.optional-dependencies]
60 59 black = [
61 60 "black",
62 61 ]
63 62 doc = [
64 63 "docrepr",
65 64 "exceptiongroup",
66 65 "intersphinx_registry",
67 66 "ipykernel",
68 67 "ipython[test]",
69 68 "matplotlib",
70 69 "setuptools>=18.5",
71 70 "sphinx-rtd-theme",
72 71 "sphinx>=1.3",
73 72 "sphinxcontrib-jquery",
74 "tomli ; python_version<'3.11'",
75 73 "typing_extensions",
76 74 ]
77 75 kernel = [
78 76 "ipykernel",
79 77 ]
80 78 nbconvert = [
81 79 "nbconvert",
82 80 ]
83 81 nbformat = [
84 82 "nbformat",
85 83 ]
86 84 notebook = [
87 85 "ipywidgets",
88 86 "notebook",
89 87 ]
90 88 parallel = [
91 89 "ipyparallel",
92 90 ]
93 91 qtconsole = [
94 92 "qtconsole",
95 93 ]
96 94 terminal = []
97 95 test = [
98 96 "pytest",
99 97 "pytest-asyncio<0.22",
100 98 "testpath",
101 99 "pickleshare",
102 100 "packaging",
103 101 ]
104 102 test_extra = [
105 103 "ipython[test]",
106 104 "curio",
107 105 "matplotlib!=3.2.0",
108 106 "nbformat",
109 107 "numpy>=1.23",
110 108 "pandas",
111 109 "trio",
112 110 ]
113 111 matplotlib = [
114 112 "matplotlib"
115 113 ]
116 114 all = [
117 115 "ipython[black,doc,kernel,nbconvert,nbformat,notebook,parallel,qtconsole,matplotlib]",
118 116 "ipython[test,test_extra]",
119 117 ]
120 118
121 119 [tool.mypy]
122 120 python_version = "3.10"
123 121 ignore_missing_imports = true
124 122 follow_imports = 'silent'
125 123 exclude = [
126 124 'test_\.+\.py',
127 125 'IPython.utils.tests.test_wildcard',
128 126 'testing',
129 127 'tests',
130 128 'PyColorize.py',
131 129 '_process_win32_controller.py',
132 130 'IPython/core/application.py',
133 131 'IPython/core/profileapp.py',
134 132 'IPython/lib/deepreload.py',
135 133 'IPython/sphinxext/ipython_directive.py',
136 134 'IPython/terminal/ipapp.py',
137 135 'IPython/utils/_process_win32.py',
138 136 'IPython/utils/path.py',
139 137 ]
140 138 # check_untyped_defs = true
141 139 # disallow_untyped_calls = true
142 140 # disallow_untyped_decorators = true
143 141 # ignore_errors = false
144 142 # ignore_missing_imports = false
145 143 disallow_incomplete_defs = true
146 144 disallow_untyped_defs = true
147 145 warn_redundant_casts = true
148 146
149 147 [[tool.mypy.overrides]]
150 148 module = [
151 149 "IPython.core.crashhandler",
152 150 ]
153 151 check_untyped_defs = true
154 152 disallow_incomplete_defs = true
155 153 disallow_untyped_calls = true
156 154 disallow_untyped_decorators = true
157 155 disallow_untyped_defs = true
158 156 ignore_errors = false
159 157 ignore_missing_imports = false
160 158
161 159 [[tool.mypy.overrides]]
162 160 module = [
163 161 "IPython.utils.text",
164 162 ]
165 163 disallow_untyped_defs = true
166 164 check_untyped_defs = false
167 165 disallow_untyped_decorators = true
168 166
169 167 [[tool.mypy.overrides]]
170 168 module = [
171 169 ]
172 170 disallow_untyped_defs = false
173 171 ignore_errors = true
174 172 ignore_missing_imports = true
175 173 disallow_untyped_calls = false
176 174 disallow_incomplete_defs = false
177 175 check_untyped_defs = false
178 176 disallow_untyped_decorators = false
179 177
180 178
181 179 # gloabl ignore error
182 180 [[tool.mypy.overrides]]
183 181 module = [
184 182 "IPython",
185 183 "IPython.conftest",
186 184 "IPython.core.alias",
187 185 "IPython.core.async_helpers",
188 186 "IPython.core.autocall",
189 187 "IPython.core.builtin_trap",
190 188 "IPython.core.compilerop",
191 189 "IPython.core.completer",
192 190 "IPython.core.completerlib",
193 191 "IPython.core.debugger",
194 192 "IPython.core.display",
195 193 "IPython.core.display_functions",
196 194 "IPython.core.display_trap",
197 195 "IPython.core.displayhook",
198 196 "IPython.core.displaypub",
199 197 "IPython.core.events",
200 198 "IPython.core.excolors",
201 199 "IPython.core.extensions",
202 200 "IPython.core.formatters",
203 201 "IPython.core.getipython",
204 202 "IPython.core.guarded_eval",
205 203 "IPython.core.history",
206 204 "IPython.core.historyapp",
207 205 "IPython.core.hooks",
208 206 "IPython.core.inputsplitter",
209 207 "IPython.core.inputtransformer",
210 208 "IPython.core.inputtransformer2",
211 209 "IPython.core.interactiveshell",
212 210 "IPython.core.logger",
213 211 "IPython.core.macro",
214 212 "IPython.core.magic",
215 213 "IPython.core.magic_arguments",
216 214 "IPython.core.magics.ast_mod",
217 215 "IPython.core.magics.auto",
218 216 "IPython.core.magics.basic",
219 217 "IPython.core.magics.code",
220 218 "IPython.core.magics.config",
221 219 "IPython.core.magics.display",
222 220 "IPython.core.magics.execution",
223 221 "IPython.core.magics.extension",
224 222 "IPython.core.magics.history",
225 223 "IPython.core.magics.logging",
226 224 "IPython.core.magics.namespace",
227 225 "IPython.core.magics.osm",
228 226 "IPython.core.magics.packaging",
229 227 "IPython.core.magics.pylab",
230 228 "IPython.core.magics.script",
231 229 "IPython.core.oinspect",
232 230 "IPython.core.page",
233 231 "IPython.core.payload",
234 232 "IPython.core.payloadpage",
235 233 "IPython.core.prefilter",
236 234 "IPython.core.profiledir",
237 235 "IPython.core.prompts",
238 236 "IPython.core.pylabtools",
239 237 "IPython.core.shellapp",
240 238 "IPython.core.splitinput",
241 239 "IPython.core.ultratb",
242 240 "IPython.extensions.autoreload",
243 241 "IPython.extensions.storemagic",
244 242 "IPython.external.qt_for_kernel",
245 243 "IPython.external.qt_loaders",
246 244 "IPython.lib.backgroundjobs",
247 245 "IPython.lib.clipboard",
248 246 "IPython.lib.demo",
249 247 "IPython.lib.display",
250 248 "IPython.lib.editorhooks",
251 249 "IPython.lib.guisupport",
252 250 "IPython.lib.latextools",
253 251 "IPython.lib.lexers",
254 252 "IPython.lib.pretty",
255 253 "IPython.paths",
256 254 "IPython.sphinxext.ipython_console_highlighting",
257 255 "IPython.terminal.debugger",
258 256 "IPython.terminal.embed",
259 257 "IPython.terminal.interactiveshell",
260 258 "IPython.terminal.magics",
261 259 "IPython.terminal.prompts",
262 260 "IPython.terminal.pt_inputhooks",
263 261 "IPython.terminal.pt_inputhooks.asyncio",
264 262 "IPython.terminal.pt_inputhooks.glut",
265 263 "IPython.terminal.pt_inputhooks.gtk",
266 264 "IPython.terminal.pt_inputhooks.gtk3",
267 265 "IPython.terminal.pt_inputhooks.gtk4",
268 266 "IPython.terminal.pt_inputhooks.osx",
269 267 "IPython.terminal.pt_inputhooks.pyglet",
270 268 "IPython.terminal.pt_inputhooks.qt",
271 269 "IPython.terminal.pt_inputhooks.tk",
272 270 "IPython.terminal.pt_inputhooks.wx",
273 271 "IPython.terminal.ptutils",
274 272 "IPython.terminal.shortcuts",
275 273 "IPython.terminal.shortcuts.auto_match",
276 274 "IPython.terminal.shortcuts.auto_suggest",
277 275 "IPython.terminal.shortcuts.filters",
278 276 "IPython.utils._process_cli",
279 277 "IPython.utils._process_common",
280 278 "IPython.utils._process_emscripten",
281 279 "IPython.utils._process_posix",
282 280 "IPython.utils.capture",
283 281 "IPython.utils.coloransi",
284 282 "IPython.utils.contexts",
285 283 "IPython.utils.data",
286 284 "IPython.utils.decorators",
287 285 "IPython.utils.dir2",
288 286 "IPython.utils.encoding",
289 287 "IPython.utils.frame",
290 288 "IPython.utils.generics",
291 289 "IPython.utils.importstring",
292 290 "IPython.utils.io",
293 291 "IPython.utils.ipstruct",
294 292 "IPython.utils.module_paths",
295 293 "IPython.utils.openpy",
296 294 "IPython.utils.process",
297 295 "IPython.utils.py3compat",
298 296 "IPython.utils.sentinel",
299 297 "IPython.utils.shimmodule",
300 298 "IPython.utils.strdispatch",
301 299 "IPython.utils.sysinfo",
302 300 "IPython.utils.syspathcontext",
303 301 "IPython.utils.tempdir",
304 302 "IPython.utils.terminal",
305 303 "IPython.utils.timing",
306 304 "IPython.utils.tokenutil",
307 305 "IPython.utils.tz",
308 306 "IPython.utils.ulinecache",
309 307 "IPython.utils.version",
310 308 "IPython.utils.wildcard",
311 309
312 310 ]
313 311 disallow_untyped_defs = false
314 312 ignore_errors = true
315 313 ignore_missing_imports = true
316 314 disallow_untyped_calls = false
317 315 disallow_incomplete_defs = false
318 316 check_untyped_defs = false
319 317 disallow_untyped_decorators = false
320 318
321 319 [tool.pytest.ini_options]
322 320 addopts = [
323 321 "--durations=10",
324 322 "-pIPython.testing.plugin.pytest_ipdoctest",
325 323 "--ipdoctest-modules",
326 324 "--ignore=docs",
327 325 "--ignore=examples",
328 326 "--ignore=htmlcov",
329 327 "--ignore=ipython_kernel",
330 328 "--ignore=ipython_parallel",
331 329 "--ignore=results",
332 330 "--ignore=tmp",
333 331 "--ignore=tools",
334 332 "--ignore=traitlets",
335 333 "--ignore=IPython/core/tests/daft_extension",
336 334 "--ignore=IPython/sphinxext",
337 335 "--ignore=IPython/terminal/pt_inputhooks",
338 336 "--ignore=IPython/__main__.py",
339 337 "--ignore=IPython/external/qt_for_kernel.py",
340 338 "--ignore=IPython/html/widgets/widget_link.py",
341 339 "--ignore=IPython/html/widgets/widget_output.py",
342 340 "--ignore=IPython/terminal/console.py",
343 341 "--ignore=IPython/utils/_process_cli.py",
344 342 "--ignore=IPython/utils/_process_posix.py",
345 343 "--ignore=IPython/utils/_process_win32.py",
346 344 "--ignore=IPython/utils/_process_win32_controller.py",
347 345 "--ignore=IPython/utils/daemonize.py",
348 346 "--ignore=IPython/utils/eventful.py",
349 347 "--ignore=IPython/kernel",
350 348 "--ignore=IPython/consoleapp.py",
351 349 "--ignore=IPython/core/inputsplitter.py",
352 350 "--ignore=IPython/lib/kernel.py",
353 351 "--ignore=IPython/utils/jsonutil.py",
354 352 "--ignore=IPython/utils/localinterfaces.py",
355 353 "--ignore=IPython/utils/log.py",
356 354 "--ignore=IPython/utils/signatures.py",
357 355 "--ignore=IPython/utils/traitlets.py",
358 356 "--ignore=IPython/utils/version.py"
359 357 ]
360 358 doctest_optionflags = [
361 359 "NORMALIZE_WHITESPACE",
362 360 "ELLIPSIS"
363 361 ]
364 362 ipdoctest_optionflags = [
365 363 "NORMALIZE_WHITESPACE",
366 364 "ELLIPSIS"
367 365 ]
368 366 asyncio_mode = "strict"
369 367
370 368 [tool.pyright]
371 369 pythonPlatform="All"
372 370
373 371 [tool.setuptools]
374 372 zip-safe = false
375 373 platforms = ["Linux", "Mac OSX", "Windows"]
376 374 license-files = ["LICENSE"]
377 375 include-package-data = false
378 376
379 377 [tool.setuptools.packages.find]
380 378 exclude = ["setupext"]
381 379 namespaces = false
382 380
383 381 [tool.setuptools.package-data]
384 382 "IPython" = ["py.typed"]
385 383 "IPython.core" = ["profile/README*"]
386 384 "IPython.core.tests" = ["*.png", "*.jpg", "daft_extension/*.py"]
387 385 "IPython.lib.tests" = ["*.wav"]
388 386 "IPython.testing.plugin" = ["*.txt"]
389 387
390 388 [tool.setuptools.dynamic]
391 389 version = {attr = "IPython.core.release.__version__"}
392 390
393 391 [tool.coverage.run]
394 392 omit = [
395 393 # omit everything in /tmp as we run tempfile
396 394 "/tmp/*",
397 395 ]
@@ -1,141 +1,133
1 1 # -*- coding: utf-8 -*-
2 2 """Setup script for IPython.
3 3
4 4 Under Posix environments it works like a typical setup.py script.
5 5 Under Windows, the command sdist is not supported, since IPython
6 6 requires utilities which are not available under Windows."""
7 7
8 8 #-----------------------------------------------------------------------------
9 9 # Copyright (c) 2008-2011, IPython Development Team.
10 10 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
11 11 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
12 12 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
13 13 #
14 14 # Distributed under the terms of the Modified BSD License.
15 15 #
16 16 # The full license is in the file COPYING.rst, distributed with this software.
17 17 #-----------------------------------------------------------------------------
18 18
19 19 import os
20 20 import sys
21 21
22 22 # **Python version check**
23 23 #
24 24 # This check is also made in IPython/__init__, don't forget to update both when
25 25 # changing Python version requirements.
26 if sys.version_info < (3, 10):
26 if sys.version_info < (3, 11):
27 27 pip_message = 'This may be due to an out of date pip. Make sure you have pip >= 9.0.1.'
28 28 try:
29 29 import pip
30 30 pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])
31 31 if pip_version < (9, 0, 1) :
32 32 pip_message = 'Your pip version is out of date, please install pip >= 9.0.1. '\
33 33 'pip {} detected.'.format(pip.__version__)
34 34 else:
35 35 # pip is new enough - it must be something else
36 36 pip_message = ''
37 37 except Exception:
38 38 pass
39 39
40 40
41 41 error = """
42 (information not available for more recent version of IPython)
42 43 IPython 8.19+ supports Python 3.10 and above, following SPEC0
43 44 IPython 8.13+ supports Python 3.9 and above, following NEP 29.
44 45 IPython 8.0-8.12 supports Python 3.8 and above, following NEP 29.
45 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
46 Python 3.3 and 3.4 were supported up to IPython 6.x.
47 Python 3.5 was supported with IPython 7.0 to 7.9.
48 Python 3.6 was supported with IPython up to 7.16.
49 Python 3.7 was still supported with the 7.x branch.
50
51 See IPython `README.rst` file for more information:
52
53 https://github.com/ipython/ipython/blob/main/README.rst
54 46
55 47 Python {py} detected.
56 48 {pip}
57 49 """.format(
58 50 py=sys.version_info, pip=pip_message
59 51 )
60 52
61 53 print(error, file=sys.stderr)
62 54 sys.exit(1)
63 55
64 56 # At least we're on the python version we need, move on.
65 57
66 58 from setuptools import setup
67 59
68 60 # Our own imports
69 61
70 62 from setupbase import target_update
71 63
72 64 from setupbase import (
73 65 setup_args,
74 66 check_package_data_first,
75 67 find_data_files,
76 68 git_prebuild,
77 69 )
78 70
79 71 #-------------------------------------------------------------------------------
80 72 # Handle OS specific things
81 73 #-------------------------------------------------------------------------------
82 74
83 75 if os.name in ('nt','dos'):
84 76 os_name = 'windows'
85 77 else:
86 78 os_name = os.name
87 79
88 80 # Under Windows, 'sdist' has not been supported. Now that the docs build with
89 81 # Sphinx it might work, but let's not turn it on until someone confirms that it
90 82 # actually works.
91 83 if os_name == 'windows' and 'sdist' in sys.argv:
92 84 print('The sdist command is not available under Windows. Exiting.')
93 85 sys.exit(1)
94 86
95 87
96 88 #-------------------------------------------------------------------------------
97 89 # Things related to the IPython documentation
98 90 #-------------------------------------------------------------------------------
99 91
100 92 # update the manuals when building a source dist
101 93 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
102 94
103 95 # List of things to be updated. Each entry is a triplet of args for
104 96 # target_update()
105 97 to_update = [
106 98 (
107 99 "docs/man/ipython.1.gz",
108 100 ["docs/man/ipython.1"],
109 101 "cd docs/man && python -m gzip --best ipython.1",
110 102 ),
111 103 ]
112 104
113 105
114 106 [ target_update(*t) for t in to_update ]
115 107
116 108 #---------------------------------------------------------------------------
117 109 # Find all the packages, package data, and data_files
118 110 #---------------------------------------------------------------------------
119 111
120 112 data_files = find_data_files()
121 113
122 114 setup_args['data_files'] = data_files
123 115
124 116 #---------------------------------------------------------------------------
125 117 # custom distutils commands
126 118 #---------------------------------------------------------------------------
127 119 # imports here, so they are after setuptools import if there was one
128 120 from setuptools.command.sdist import sdist
129 121
130 122 setup_args['cmdclass'] = {
131 123 'build_py': \
132 124 check_package_data_first(git_prebuild('IPython')),
133 125 'sdist' : git_prebuild('IPython', sdist),
134 126 }
135 127
136 128 #---------------------------------------------------------------------------
137 129 # Do the actual setup now
138 130 #---------------------------------------------------------------------------
139 131
140 132 if __name__ == "__main__":
141 133 setup(**setup_args)
General Comments 0
You need to be logged in to leave comments. Login now