##// END OF EJS Templates
Merge branch 'master' into alt_text
blois -
r26808:a09e5776 merge
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -0,0 +1,26 b''
1 name: Build docs
2
3 on: [push, pull_request]
4
5 jobs:
6 build:
7 runs-on: ubuntu-latest
8
9 steps:
10 - uses: actions/checkout@v2
11 - name: Set up Python 3.8
12 uses: actions/setup-python@v2
13 with:
14 python-version: 3.8
15 - name: Install Graphviz
16 run: |
17 sudo apt-get update
18 sudo apt-get install graphviz
19 - name: Install Python dependencies
20 run: |
21 python -m pip install --upgrade pip setuptools
22 pip install -r docs/requirements.txt
23 - name: Build docs
24 run: |
25 python tools/fixup_whats_new_pr.py
26 make -C docs/ html SPHINXOPTS="-W"
@@ -0,0 +1,23 b''
1 name: Run tests on OSX
2
3 on: [push, pull_request]
4
5 jobs:
6 test:
7 runs-on: macos-latest
8
9 steps:
10 - uses: actions/checkout@v2
11 - name: Set up Python 3.7
12 uses: actions/setup-python@v2
13 with:
14 python-version: 3.7
15 - name: Install and update Python dependencies
16 run: |
17 python -m pip install --upgrade pip setuptools wheel
18 python -m pip install --upgrade -e file://$PWD#egg=ipython[test]
19 python -m pip install --upgrade --upgrade-strategy eager trio curio
20 python -m pip install --upgrade pytest pytest-trio 'matplotlib!=3.2.0'
21 python -m pip install --upgrade anyio
22 - name: pytest
23 run: pytest
@@ -0,0 +1,36 b''
1 name: Run tests
2
3 on: [push, pull_request]
4
5 jobs:
6 test:
7 runs-on: ubuntu-latest
8 strategy:
9 matrix:
10 python-version: [3.7, 3.8, 3.9]
11
12 steps:
13 - uses: actions/checkout@v2
14 - name: Set up Python ${{ matrix.python-version }}
15 uses: actions/setup-python@v2
16 with:
17 python-version: ${{ matrix.python-version }}
18 - name: Install and update Python dependencies
19 run: |
20 python -m pip install --upgrade pip setuptools wheel
21 python -m pip install --upgrade -e file://$PWD#egg=ipython[test]
22 python -m pip install --upgrade --upgrade-strategy eager trio curio
23 python -m pip install --upgrade pytest pytest-trio 'matplotlib!=3.2.0'
24 python -m pip install --upgrade check-manifest pytest-cov anyio
25 - name: Check manifest
26 run: check-manifest
27 - name: iptest
28 run: |
29 cd /tmp && iptest --coverage xml && cd -
30 cp /tmp/ipy_coverage.xml ./
31 cp /tmp/.coverage ./
32 - name: pytest
33 run: |
34 pytest
35 - name: Upload coverage to Codecov
36 uses: codecov/codecov-action@v1
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,15 +1,21 b''
1 1 # When making commits that are strictly formatting/style changes, add the
2 2 # commit hash here, so git blame can ignore the change. See docs for more
3 3 # details:
4 4 # https://git-scm.com/docs/git-config#Documentation/git-config.txt-blameignoreRevsFile
5 5 #
6 6 #
7 7 # You should be able to execute either
8 8 # ./tools/configure-git-blame-ignore-revs.bat or
9 9 # ./tools/configure-git-blame-ignore-revs.sh
10 10 #
11 11 # Example entries:
12 12 #
13 13 # <full commit hash> # initial black-format
14 14 # <full commit hash> # rename something internal
15 15 6e748726282d1acb9a4f9f264ee679c474c4b8f5 # Apply pygrade --36plus on IPython/core/tests/test_inputtransformer.py.
16 0233e65d8086d0ec34acb8685b7a5411633f0899 # apply pyupgrade to IPython/extensions/tests/test_autoreload.py
17 a6a7e4dd7e51b892147895006d3a2a6c34b79ae6 # apply black to IPython/extensions/tests/test_autoreload.py
18 c5ca5a8f25432dfd6b9eccbbe446a8348bf37cfa # apply pyupgrade to IPython/extensions/autoreload.py
19 50624b84ccdece781750f5eb635a9efbf2fe30d6 # apply black to IPython/extensions/autoreload.py
20 b7aaa47412b96379198705955004930c57f9d74a # apply pyupgrade to IPython/extensions/autoreload.py
21 9c7476a88af3e567426b412f1b3c778401d8f6aa # apply black to IPython/extensions/autoreload.py
@@ -1,19 +1,16 b''
1 1 ---
2 2 name: Bug report / Question / Feature
3 3 about: Anything related to IPython itsel
4 4 title: ''
5 5 labels: ''
6 6 assignees: ''
7 7
8 8 ---
9 9
10 10 <!-- This is the repository for IPython command line, if you can try to make sure this question/bug/feature belong here and not on one of the Jupyter repositories.
11 11
12 12 If it's a generic Python/Jupyter question, try other forums or discourse.jupyter.org.
13 13
14 14 If you are unsure, it's ok to post here, though, there are few maintainer so you might not get a fast response.
15 15
16 Ability of maintainers to spend time and resources on project like IPython is heavily influenced by US politics, and the current government policies have been harmful to the IPython Maintainers and Community.
17
18 If you are on the fence on who to vote for or wether to vote, please cast your vote in for the democrat party in the US.
19 16 -->
@@ -1,33 +1,34 b''
1 1 name: Run MyPy
2 2
3 3 on:
4 4 push:
5 branches: [ master ]
5 branches: [ master, 7.x]
6 6 pull_request:
7 branches: [ master ]
7 branches: [ master, 7.x]
8 8
9 9 jobs:
10 10 build:
11 11
12 12 runs-on: ubuntu-latest
13 13 strategy:
14 14 matrix:
15 15 python-version: [3.8]
16 16
17 17 steps:
18 18 - uses: actions/checkout@v2
19 19 - name: Set up Python ${{ matrix.python-version }}
20 20 uses: actions/setup-python@v2
21 21 with:
22 22 python-version: ${{ matrix.python-version }}
23 23 - name: Install dependencies
24 24 run: |
25 25 python -m pip install --upgrade pip
26 26 pip install mypy pyflakes flake8
27 27 - name: Lint with mypy
28 28 run: |
29 mypy IPython/terminal/ptutils.py
30 mypy IPython/core/c*.py
29 mypy -p IPython.terminal
30 mypy -p IPython.core.magics
31 31 - name: Lint with pyflakes
32 32 run: |
33 33 flake8 IPython/core/magics/script.py
34 flake8 IPython/core/magics/packaging.py
@@ -1,39 +1,39 b''
1 1 # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 2 # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 3
4 4 name: Python package
5 5
6 6 on:
7 7 push:
8 branches: [ master ]
8 branches: [ master, 7.x ]
9 9 pull_request:
10 branches: [ master ]
10 branches: [ master, 7.x ]
11 11
12 12 jobs:
13 13 build:
14 14
15 15 runs-on: ubuntu-latest
16 16 strategy:
17 17 matrix:
18 18 python-version: [3.8]
19 19
20 20 steps:
21 21 - uses: actions/checkout@v2
22 22 with:
23 23 fetch-depth: 0
24 24 - name: Set up Python ${{ matrix.python-version }}
25 25 uses: actions/setup-python@v2
26 26 with:
27 27 python-version: ${{ matrix.python-version }}
28 28 - name: Install dependencies
29 29 run: |
30 30 python -m pip install --upgrade pip
31 pip install darker
31 pip install darker isort
32 32 - name: Lint with darker
33 33 run: |
34 34 darker -r 60625f241f298b5039cb2debc365db38aa7bb522 --check --diff . || (
35 35 echo "Changes need auto-formatting. Run:"
36 36 echo " darker -r 60625f241f298b5039cb2debc365db38aa7bb522"
37 37 echo "then commit and push changes to fix."
38 38 exit 1
39 39 )
@@ -1,30 +1,32 b''
1 1 MANIFEST
2 2 build
3 3 dist
4 4 _build
5 5 docs/man/*.gz
6 6 docs/source/api/generated
7 7 docs/source/config/options
8 8 docs/source/config/shortcuts/*.csv
9 docs/source/savefig
9 10 docs/source/interactive/magics-generated.txt
10 11 docs/gh-pages
11 12 jupyter_notebook/notebook/static/mathjax
12 13 jupyter_notebook/static/style/*.map
13 14 *.py[co]
14 15 __pycache__
15 16 *.egg-info
16 17 *~
17 18 *.bak
18 19 .ipynb_checkpoints
19 20 .tox
20 21 .DS_Store
21 22 \#*#
22 23 .#*
23 24 .cache
24 25 .coverage
25 26 *.swp
26 27 .vscode
27 28 .pytest_cache
28 29 .python-version
29 30 venv*/
30 31 .idea/
32 .mypy_cache/
@@ -1,152 +1,149 b''
1 1 # encoding: utf-8
2 2 """
3 3 IPython: tools for interactive and parallel computing in Python.
4 4
5 5 https://ipython.org
6 6 """
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (c) 2008-2011, IPython Development Team.
9 9 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
10 10 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
11 11 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
12 12 #
13 13 # Distributed under the terms of the Modified BSD License.
14 14 #
15 15 # The full license is in the file COPYING.txt, distributed with this software.
16 16 #-----------------------------------------------------------------------------
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Imports
20 20 #-----------------------------------------------------------------------------
21 21
22 22 import os
23 23 import sys
24 24
25 25 #-----------------------------------------------------------------------------
26 26 # Setup everything
27 27 #-----------------------------------------------------------------------------
28 28
29 29 # Don't forget to also update setup.py when this changes!
30 30 if sys.version_info < (3, 6):
31 31 raise ImportError(
32 32 """
33 33 IPython 7.10+ supports Python 3.6 and above.
34 34 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
35 35 Python 3.3 and 3.4 were supported up to IPython 6.x.
36 36 Python 3.5 was supported with IPython 7.0 to 7.9.
37 37
38 38 See IPython `README.rst` file for more information:
39 39
40 40 https://github.com/ipython/ipython/blob/master/README.rst
41 41
42 42 """)
43 43
44 44 # Make it easy to import extensions - they are always directly on pythonpath.
45 45 # Therefore, non-IPython modules can be added to extensions directory.
46 46 # This should probably be in ipapp.py.
47 47 sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
48 48
49 49 #-----------------------------------------------------------------------------
50 50 # Setup the top level names
51 51 #-----------------------------------------------------------------------------
52 52
53 53 from .core.getipython import get_ipython
54 54 from .core import release
55 55 from .core.application import Application
56 56 from .terminal.embed import embed
57 57
58 58 from .core.interactiveshell import InteractiveShell
59 59 from .testing import test
60 60 from .utils.sysinfo import sys_info
61 61 from .utils.frame import extract_module_locals
62 62
63 63 # Release data
64 64 __author__ = '%s <%s>' % (release.author, release.author_email)
65 65 __license__ = release.license
66 66 __version__ = release.version
67 67 version_info = release.version_info
68 68
69 69 def embed_kernel(module=None, local_ns=None, **kwargs):
70 70 """Embed and start an IPython kernel in a given scope.
71
71
72 72 If you don't want the kernel to initialize the namespace
73 73 from the scope of the surrounding function,
74 74 and/or you want to load full IPython configuration,
75 75 you probably want `IPython.start_kernel()` instead.
76
76
77 77 Parameters
78 78 ----------
79 79 module : types.ModuleType, optional
80 80 The module to load into IPython globals (default: caller)
81 81 local_ns : dict, optional
82 82 The namespace to load into IPython user namespace (default: caller)
83
84 kwargs : various, optional
83 **kwargs : various, optional
85 84 Further keyword args are relayed to the IPKernelApp constructor,
86 85 allowing configuration of the Kernel. Will only have an effect
87 86 on the first embed_kernel call for a given process.
88 87 """
89 88
90 89 (caller_module, caller_locals) = extract_module_locals(1)
91 90 if module is None:
92 91 module = caller_module
93 92 if local_ns is None:
94 93 local_ns = caller_locals
95 94
96 95 # Only import .zmq when we really need it
97 96 from ipykernel.embed import embed_kernel as real_embed_kernel
98 97 real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
99 98
100 99 def start_ipython(argv=None, **kwargs):
101 100 """Launch a normal IPython instance (as opposed to embedded)
102
101
103 102 `IPython.embed()` puts a shell in a particular calling scope,
104 103 such as a function or method for debugging purposes,
105 104 which is often not desirable.
106
105
107 106 `start_ipython()` does full, regular IPython initialization,
108 107 including loading startup files, configuration, etc.
109 108 much of which is skipped by `embed()`.
110
109
111 110 This is a public API method, and will survive implementation changes.
112
111
113 112 Parameters
114 113 ----------
115
116 114 argv : list or None, optional
117 115 If unspecified or None, IPython will parse command-line options from sys.argv.
118 116 To prevent any command-line parsing, pass an empty list: `argv=[]`.
119 117 user_ns : dict, optional
120 118 specify this dictionary to initialize the IPython user namespace with particular values.
121 kwargs : various, optional
119 **kwargs : various, optional
122 120 Any other kwargs will be passed to the Application constructor,
123 121 such as `config`.
124 122 """
125 123 from IPython.terminal.ipapp import launch_new_instance
126 124 return launch_new_instance(argv=argv, **kwargs)
127 125
128 126 def start_kernel(argv=None, **kwargs):
129 127 """Launch a normal IPython kernel instance (as opposed to embedded)
130
128
131 129 `IPython.embed_kernel()` puts a shell in a particular calling scope,
132 130 such as a function or method for debugging purposes,
133 131 which is often not desirable.
134
132
135 133 `start_kernel()` does full, regular IPython initialization,
136 134 including loading startup files, configuration, etc.
137 135 much of which is skipped by `embed()`.
138
136
139 137 Parameters
140 138 ----------
141
142 139 argv : list or None, optional
143 140 If unspecified or None, IPython will parse command-line options from sys.argv.
144 141 To prevent any command-line parsing, pass an empty list: `argv=[]`.
145 142 user_ns : dict, optional
146 143 specify this dictionary to initialize the IPython user namespace with particular values.
147 kwargs : various, optional
144 **kwargs : various, optional
148 145 Any other kwargs will be passed to the Application constructor,
149 146 such as `config`.
150 147 """
151 148 from IPython.kernel.zmq.kernelapp import launch_new_instance
152 149 return launch_new_instance(argv=argv, **kwargs)
@@ -1,464 +1,486 b''
1 1 # encoding: utf-8
2 2 """
3 3 An application for IPython.
4 4
5 5 All top-level applications should use the classes in this module for
6 6 handling configuration and creating configurables.
7 7
8 8 The job of an :class:`Application` is to create the master configuration
9 9 object and then create the configurable objects, passing the config to them.
10 10 """
11 11
12 12 # Copyright (c) IPython Development Team.
13 13 # Distributed under the terms of the Modified BSD License.
14 14
15 15 import atexit
16 16 from copy import deepcopy
17 17 import glob
18 18 import logging
19 19 import os
20 20 import shutil
21 21 import sys
22 22
23 23 from pathlib import Path
24 24
25 25 from traitlets.config.application import Application, catch_config_error
26 26 from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader
27 27 from IPython.core import release, crashhandler
28 28 from IPython.core.profiledir import ProfileDir, ProfileDirError
29 29 from IPython.paths import get_ipython_dir, get_ipython_package_dir
30 30 from IPython.utils.path import ensure_dir_exists
31 31 from traitlets import (
32 32 List, Unicode, Type, Bool, Set, Instance, Undefined,
33 33 default, observe,
34 34 )
35 35
36 36 if os.name == "nt":
37 37 programdata = Path(os.environ.get("PROGRAMDATA", None))
38 38 if programdata:
39 39 SYSTEM_CONFIG_DIRS = [str(programdata / "ipython")]
40 40 else: # PROGRAMDATA is not defined by default on XP.
41 41 SYSTEM_CONFIG_DIRS = []
42 42 else:
43 43 SYSTEM_CONFIG_DIRS = [
44 44 "/usr/local/etc/ipython",
45 45 "/etc/ipython",
46 46 ]
47 47
48 48
49 49 ENV_CONFIG_DIRS = []
50 50 _env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython')
51 51 if _env_config_dir not in SYSTEM_CONFIG_DIRS:
52 52 # only add ENV_CONFIG if sys.prefix is not already included
53 53 ENV_CONFIG_DIRS.append(_env_config_dir)
54 54
55 55
56 56 _envvar = os.environ.get('IPYTHON_SUPPRESS_CONFIG_ERRORS')
57 57 if _envvar in {None, ''}:
58 58 IPYTHON_SUPPRESS_CONFIG_ERRORS = None
59 59 else:
60 60 if _envvar.lower() in {'1','true'}:
61 61 IPYTHON_SUPPRESS_CONFIG_ERRORS = True
62 62 elif _envvar.lower() in {'0','false'} :
63 63 IPYTHON_SUPPRESS_CONFIG_ERRORS = False
64 64 else:
65 65 sys.exit("Unsupported value for environment variable: 'IPYTHON_SUPPRESS_CONFIG_ERRORS' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar )
66 66
67 67 # aliases and flags
68 68
69 base_aliases = {
70 'profile-dir' : 'ProfileDir.location',
71 'profile' : 'BaseIPythonApplication.profile',
72 'ipython-dir' : 'BaseIPythonApplication.ipython_dir',
73 'log-level' : 'Application.log_level',
74 'config' : 'BaseIPythonApplication.extra_config_file',
75 }
76
77 base_flags = dict(
78 debug = ({'Application' : {'log_level' : logging.DEBUG}},
79 "set log level to logging.DEBUG (maximize logging output)"),
80 quiet = ({'Application' : {'log_level' : logging.CRITICAL}},
81 "set log level to logging.CRITICAL (minimize logging output)"),
82 init = ({'BaseIPythonApplication' : {
83 'copy_config_files' : True,
84 'auto_create' : True}
85 }, """Initialize profile with default config files. This is equivalent
69 base_aliases = {}
70 if isinstance(Application.aliases, dict):
71 # traitlets 5
72 base_aliases.update(Application.aliases)
73 base_aliases.update(
74 {
75 "profile-dir": "ProfileDir.location",
76 "profile": "BaseIPythonApplication.profile",
77 "ipython-dir": "BaseIPythonApplication.ipython_dir",
78 "log-level": "Application.log_level",
79 "config": "BaseIPythonApplication.extra_config_file",
80 }
81 )
82
83 base_flags = dict()
84 if isinstance(Application.flags, dict):
85 # traitlets 5
86 base_flags.update(Application.flags)
87 base_flags.update(
88 dict(
89 debug=(
90 {"Application": {"log_level": logging.DEBUG}},
91 "set log level to logging.DEBUG (maximize logging output)",
92 ),
93 quiet=(
94 {"Application": {"log_level": logging.CRITICAL}},
95 "set log level to logging.CRITICAL (minimize logging output)",
96 ),
97 init=(
98 {
99 "BaseIPythonApplication": {
100 "copy_config_files": True,
101 "auto_create": True,
102 }
103 },
104 """Initialize profile with default config files. This is equivalent
86 105 to running `ipython profile create <profile>` prior to startup.
87 """)
106 """,
107 ),
108 )
88 109 )
89 110
111
90 112 class ProfileAwareConfigLoader(PyFileConfigLoader):
91 113 """A Python file config loader that is aware of IPython profiles."""
92 114 def load_subconfig(self, fname, path=None, profile=None):
93 115 if profile is not None:
94 116 try:
95 117 profile_dir = ProfileDir.find_profile_dir_by_name(
96 118 get_ipython_dir(),
97 119 profile,
98 120 )
99 121 except ProfileDirError:
100 122 return
101 123 path = profile_dir.location
102 124 return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
103 125
104 126 class BaseIPythonApplication(Application):
105 127
106 128 name = u'ipython'
107 129 description = Unicode(u'IPython: an enhanced interactive Python shell.')
108 130 version = Unicode(release.version)
109 131
110 132 aliases = base_aliases
111 133 flags = base_flags
112 134 classes = List([ProfileDir])
113 135
114 136 # enable `load_subconfig('cfg.py', profile='name')`
115 137 python_config_loader_class = ProfileAwareConfigLoader
116 138
117 139 # Track whether the config_file has changed,
118 140 # because some logic happens only if we aren't using the default.
119 141 config_file_specified = Set()
120 142
121 143 config_file_name = Unicode()
122 144 @default('config_file_name')
123 145 def _config_file_name_default(self):
124 146 return self.name.replace('-','_') + u'_config.py'
125 147 @observe('config_file_name')
126 148 def _config_file_name_changed(self, change):
127 149 if change['new'] != change['old']:
128 150 self.config_file_specified.add(change['new'])
129 151
130 152 # The directory that contains IPython's builtin profiles.
131 153 builtin_profile_dir = Unicode(
132 154 os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default')
133 155 )
134 156
135 157 config_file_paths = List(Unicode())
136 158 @default('config_file_paths')
137 159 def _config_file_paths_default(self):
138 160 return [os.getcwd()]
139 161
140 162 extra_config_file = Unicode(
141 163 help="""Path to an extra config file to load.
142 164
143 165 If specified, load this config file in addition to any other IPython config.
144 166 """).tag(config=True)
145 167 @observe('extra_config_file')
146 168 def _extra_config_file_changed(self, change):
147 169 old = change['old']
148 170 new = change['new']
149 171 try:
150 172 self.config_files.remove(old)
151 173 except ValueError:
152 174 pass
153 175 self.config_file_specified.add(new)
154 176 self.config_files.append(new)
155 177
156 178 profile = Unicode(u'default',
157 179 help="""The IPython profile to use."""
158 180 ).tag(config=True)
159 181
160 182 @observe('profile')
161 183 def _profile_changed(self, change):
162 184 self.builtin_profile_dir = os.path.join(
163 185 get_ipython_package_dir(), u'config', u'profile', change['new']
164 186 )
165 187
166 188 ipython_dir = Unicode(
167 189 help="""
168 190 The name of the IPython directory. This directory is used for logging
169 191 configuration (through profiles), history storage, etc. The default
170 192 is usually $HOME/.ipython. This option can also be specified through
171 193 the environment variable IPYTHONDIR.
172 194 """
173 195 ).tag(config=True)
174 196 @default('ipython_dir')
175 197 def _ipython_dir_default(self):
176 198 d = get_ipython_dir()
177 199 self._ipython_dir_changed({
178 200 'name': 'ipython_dir',
179 201 'old': d,
180 202 'new': d,
181 203 })
182 204 return d
183 205
184 206 _in_init_profile_dir = False
185 207 profile_dir = Instance(ProfileDir, allow_none=True)
186 208 @default('profile_dir')
187 209 def _profile_dir_default(self):
188 210 # avoid recursion
189 211 if self._in_init_profile_dir:
190 212 return
191 213 # profile_dir requested early, force initialization
192 214 self.init_profile_dir()
193 215 return self.profile_dir
194 216
195 217 overwrite = Bool(False,
196 218 help="""Whether to overwrite existing config files when copying"""
197 219 ).tag(config=True)
198 220 auto_create = Bool(False,
199 221 help="""Whether to create profile dir if it doesn't exist"""
200 222 ).tag(config=True)
201 223
202 224 config_files = List(Unicode())
203 225 @default('config_files')
204 226 def _config_files_default(self):
205 227 return [self.config_file_name]
206 228
207 229 copy_config_files = Bool(False,
208 230 help="""Whether to install the default config files into the profile dir.
209 231 If a new profile is being created, and IPython contains config files for that
210 232 profile, then they will be staged into the new directory. Otherwise,
211 233 default config files will be automatically generated.
212 234 """).tag(config=True)
213 235
214 236 verbose_crash = Bool(False,
215 237 help="""Create a massive crash report when IPython encounters what may be an
216 238 internal error. The default is to append a short message to the
217 239 usual traceback""").tag(config=True)
218 240
219 241 # The class to use as the crash handler.
220 242 crash_handler_class = Type(crashhandler.CrashHandler)
221 243
222 244 @catch_config_error
223 245 def __init__(self, **kwargs):
224 246 super(BaseIPythonApplication, self).__init__(**kwargs)
225 247 # ensure current working directory exists
226 248 try:
227 249 os.getcwd()
228 250 except:
229 251 # exit if cwd doesn't exist
230 252 self.log.error("Current working directory doesn't exist.")
231 253 self.exit(1)
232 254
233 255 #-------------------------------------------------------------------------
234 256 # Various stages of Application creation
235 257 #-------------------------------------------------------------------------
236 258
237 259 deprecated_subcommands = {}
238 260
239 261 def initialize_subcommand(self, subc, argv=None):
240 262 if subc in self.deprecated_subcommands:
241 263 self.log.warning("Subcommand `ipython {sub}` is deprecated and will be removed "
242 264 "in future versions.".format(sub=subc))
243 265 self.log.warning("You likely want to use `jupyter {sub}` in the "
244 266 "future".format(sub=subc))
245 267 return super(BaseIPythonApplication, self).initialize_subcommand(subc, argv)
246 268
247 269 def init_crash_handler(self):
248 270 """Create a crash handler, typically setting sys.excepthook to it."""
249 271 self.crash_handler = self.crash_handler_class(self)
250 272 sys.excepthook = self.excepthook
251 273 def unset_crashhandler():
252 274 sys.excepthook = sys.__excepthook__
253 275 atexit.register(unset_crashhandler)
254 276
255 277 def excepthook(self, etype, evalue, tb):
256 278 """this is sys.excepthook after init_crashhandler
257
279
258 280 set self.verbose_crash=True to use our full crashhandler, instead of
259 281 a regular traceback with a short message (crash_handler_lite)
260 282 """
261 283
262 284 if self.verbose_crash:
263 285 return self.crash_handler(etype, evalue, tb)
264 286 else:
265 287 return crashhandler.crash_handler_lite(etype, evalue, tb)
266 288
267 289 @observe('ipython_dir')
268 290 def _ipython_dir_changed(self, change):
269 291 old = change['old']
270 292 new = change['new']
271 293 if old is not Undefined:
272 294 str_old = os.path.abspath(old)
273 295 if str_old in sys.path:
274 296 sys.path.remove(str_old)
275 297 str_path = os.path.abspath(new)
276 298 sys.path.append(str_path)
277 299 ensure_dir_exists(new)
278 300 readme = os.path.join(new, 'README')
279 301 readme_src = os.path.join(get_ipython_package_dir(), u'config', u'profile', 'README')
280 302 if not os.path.exists(readme) and os.path.exists(readme_src):
281 303 shutil.copy(readme_src, readme)
282 304 for d in ('extensions', 'nbextensions'):
283 305 path = os.path.join(new, d)
284 306 try:
285 307 ensure_dir_exists(path)
286 308 except OSError as e:
287 309 # this will not be EEXIST
288 310 self.log.error("couldn't create path %s: %s", path, e)
289 311 self.log.debug("IPYTHONDIR set to: %s" % new)
290 312
291 313 def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS):
292 314 """Load the config file.
293 315
294 316 By default, errors in loading config are handled, and a warning
295 317 printed on screen. For testing, the suppress_errors option is set
296 318 to False, so errors will make tests fail.
297 319
298 320 `suppress_errors` default value is to be `None` in which case the
299 321 behavior default to the one of `traitlets.Application`.
300 322
301 323 The default value can be set :
302 324 - to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive).
303 325 - to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive).
304 326 - to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset.
305 327
306 328 Any other value are invalid, and will make IPython exit with a non-zero return code.
307 329 """
308 330
309 331
310 332 self.log.debug("Searching path %s for config files", self.config_file_paths)
311 333 base_config = 'ipython_config.py'
312 334 self.log.debug("Attempting to load config file: %s" %
313 335 base_config)
314 336 try:
315 337 if suppress_errors is not None:
316 338 old_value = Application.raise_config_file_errors
317 339 Application.raise_config_file_errors = not suppress_errors;
318 340 Application.load_config_file(
319 341 self,
320 342 base_config,
321 343 path=self.config_file_paths
322 344 )
323 345 except ConfigFileNotFound:
324 346 # ignore errors loading parent
325 347 self.log.debug("Config file %s not found", base_config)
326 348 pass
327 349 if suppress_errors is not None:
328 350 Application.raise_config_file_errors = old_value
329 351
330 352 for config_file_name in self.config_files:
331 353 if not config_file_name or config_file_name == base_config:
332 354 continue
333 355 self.log.debug("Attempting to load config file: %s" %
334 356 self.config_file_name)
335 357 try:
336 358 Application.load_config_file(
337 359 self,
338 360 config_file_name,
339 361 path=self.config_file_paths
340 362 )
341 363 except ConfigFileNotFound:
342 364 # Only warn if the default config file was NOT being used.
343 365 if config_file_name in self.config_file_specified:
344 366 msg = self.log.warning
345 367 else:
346 368 msg = self.log.debug
347 369 msg("Config file not found, skipping: %s", config_file_name)
348 370 except Exception:
349 371 # For testing purposes.
350 372 if not suppress_errors:
351 373 raise
352 374 self.log.warning("Error loading config file: %s" %
353 375 self.config_file_name, exc_info=True)
354 376
355 377 def init_profile_dir(self):
356 378 """initialize the profile dir"""
357 379 self._in_init_profile_dir = True
358 380 if self.profile_dir is not None:
359 381 # already ran
360 382 return
361 383 if 'ProfileDir.location' not in self.config:
362 384 # location not specified, find by profile name
363 385 try:
364 386 p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
365 387 except ProfileDirError:
366 388 # not found, maybe create it (always create default profile)
367 389 if self.auto_create or self.profile == 'default':
368 390 try:
369 391 p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config)
370 392 except ProfileDirError:
371 393 self.log.fatal("Could not create profile: %r"%self.profile)
372 394 self.exit(1)
373 395 else:
374 396 self.log.info("Created profile dir: %r"%p.location)
375 397 else:
376 398 self.log.fatal("Profile %r not found."%self.profile)
377 399 self.exit(1)
378 400 else:
379 401 self.log.debug("Using existing profile dir: %r"%p.location)
380 402 else:
381 403 location = self.config.ProfileDir.location
382 404 # location is fully specified
383 405 try:
384 406 p = ProfileDir.find_profile_dir(location, self.config)
385 407 except ProfileDirError:
386 408 # not found, maybe create it
387 409 if self.auto_create:
388 410 try:
389 411 p = ProfileDir.create_profile_dir(location, self.config)
390 412 except ProfileDirError:
391 413 self.log.fatal("Could not create profile directory: %r"%location)
392 414 self.exit(1)
393 415 else:
394 416 self.log.debug("Creating new profile dir: %r"%location)
395 417 else:
396 418 self.log.fatal("Profile directory %r not found."%location)
397 419 self.exit(1)
398 420 else:
399 421 self.log.info("Using existing profile dir: %r"%location)
400 422 # if profile_dir is specified explicitly, set profile name
401 423 dir_name = os.path.basename(p.location)
402 424 if dir_name.startswith('profile_'):
403 425 self.profile = dir_name[8:]
404 426
405 427 self.profile_dir = p
406 428 self.config_file_paths.append(p.location)
407 429 self._in_init_profile_dir = False
408 430
409 431 def init_config_files(self):
410 432 """[optionally] copy default config files into profile dir."""
411 433 self.config_file_paths.extend(ENV_CONFIG_DIRS)
412 434 self.config_file_paths.extend(SYSTEM_CONFIG_DIRS)
413 435 # copy config files
414 436 path = Path(self.builtin_profile_dir)
415 437 if self.copy_config_files:
416 438 src = self.profile
417 439
418 440 cfg = self.config_file_name
419 441 if path and (path / cfg).exists():
420 442 self.log.warning(
421 443 "Staging %r from %s into %r [overwrite=%s]"
422 444 % (cfg, src, self.profile_dir.location, self.overwrite)
423 445 )
424 446 self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite)
425 447 else:
426 448 self.stage_default_config_file()
427 449 else:
428 450 # Still stage *bundled* config files, but not generated ones
429 451 # This is necessary for `ipython profile=sympy` to load the profile
430 452 # on the first go
431 453 files = path.glob("*.py")
432 454 for fullpath in files:
433 455 cfg = fullpath.name
434 456 if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False):
435 457 # file was copied
436 458 self.log.warning("Staging bundled %s from %s into %r"%(
437 459 cfg, self.profile, self.profile_dir.location)
438 460 )
439 461
440 462
441 463 def stage_default_config_file(self):
442 464 """auto generate default config file, and stage it into the profile."""
443 465 s = self.generate_config_file()
444 466 config_file = Path(self.profile_dir.location) / self.config_file_name
445 467 if self.overwrite or not config_file.exists():
446 468 self.log.warning("Generating default config file: %r" % (config_file))
447 469 config_file.write_text(s)
448 470
449 471 @catch_config_error
450 472 def initialize(self, argv=None):
451 473 # don't hook up crash handler before parsing command-line
452 474 self.parse_command_line(argv)
453 475 self.init_crash_handler()
454 476 if self.subapp is not None:
455 477 # stop here if subapp is taking over
456 478 return
457 479 # save a copy of CLI config to re-load after config files
458 480 # so that it has highest priority
459 481 cl_config = deepcopy(self.config)
460 482 self.init_profile_dir()
461 483 self.init_config_files()
462 484 self.load_config_file()
463 485 # enforce cl-opts override configfile opts:
464 486 self.update_config(cl_config)
@@ -1,70 +1,70 b''
1 1 # encoding: utf-8
2 2 """
3 3 Autocall capabilities for IPython.core.
4 4
5 5 Authors:
6 6
7 7 * Brian Granger
8 8 * Fernando Perez
9 9 * Thomas Kluyver
10 10
11 11 Notes
12 12 -----
13 13 """
14 14
15 15 #-----------------------------------------------------------------------------
16 16 # Copyright (C) 2008-2011 The IPython Development Team
17 17 #
18 18 # Distributed under the terms of the BSD License. The full license is in
19 19 # the file COPYING, distributed as part of this software.
20 20 #-----------------------------------------------------------------------------
21 21
22 22 #-----------------------------------------------------------------------------
23 23 # Imports
24 24 #-----------------------------------------------------------------------------
25 25
26 26
27 27 #-----------------------------------------------------------------------------
28 28 # Code
29 29 #-----------------------------------------------------------------------------
30 30
31 31 class IPyAutocall(object):
32 32 """ Instances of this class are always autocalled
33 33
34 34 This happens regardless of 'autocall' variable state. Use this to
35 35 develop macro-like mechanisms.
36 36 """
37 37 _ip = None
38 38 rewrite = True
39 39 def __init__(self, ip=None):
40 40 self._ip = ip
41 41
42 42 def set_ip(self, ip):
43 43 """ Will be used to set _ip point to current ipython instance b/f call
44
44
45 45 Override this method if you don't want this to happen.
46
46
47 47 """
48 48 self._ip = ip
49 49
50 50
51 51 class ExitAutocall(IPyAutocall):
52 52 """An autocallable object which will be added to the user namespace so that
53 53 exit, exit(), quit or quit() are all valid ways to close the shell."""
54 54 rewrite = False
55 55
56 56 def __call__(self):
57 57 self._ip.ask_exit()
58 58
59 59 class ZMQExitAutocall(ExitAutocall):
60 60 """Exit IPython. Autocallable, so it needn't be explicitly called.
61 61
62 62 Parameters
63 63 ----------
64 64 keep_kernel : bool
65 65 If True, leave the kernel alive. Otherwise, tell the kernel to exit too
66 66 (default).
67 67 """
68 68 def __call__(self, keep_kernel=False):
69 69 self._ip.keepkernel_on_exit = keep_kernel
70 70 self._ip.ask_exit()
@@ -1,188 +1,196 b''
1 1 """Compiler tools with improved interactive support.
2 2
3 3 Provides compilation machinery similar to codeop, but with caching support so
4 4 we can provide interactive tracebacks.
5 5
6 6 Authors
7 7 -------
8 8 * Robert Kern
9 9 * Fernando Perez
10 10 * Thomas Kluyver
11 11 """
12 12
13 13 # Note: though it might be more natural to name this module 'compiler', that
14 14 # name is in the stdlib and name collisions with the stdlib tend to produce
15 15 # weird problems (often with third-party tools).
16 16
17 17 #-----------------------------------------------------------------------------
18 18 # Copyright (C) 2010-2011 The IPython Development Team.
19 19 #
20 20 # Distributed under the terms of the BSD License.
21 21 #
22 22 # The full license is in the file COPYING.txt, distributed with this software.
23 23 #-----------------------------------------------------------------------------
24 24
25 25 #-----------------------------------------------------------------------------
26 26 # Imports
27 27 #-----------------------------------------------------------------------------
28 28
29 29 # Stdlib imports
30 30 import __future__
31 31 from ast import PyCF_ONLY_AST
32 32 import codeop
33 33 import functools
34 34 import hashlib
35 35 import linecache
36 36 import operator
37 37 import time
38 38 from contextlib import contextmanager
39 39
40 40 #-----------------------------------------------------------------------------
41 41 # Constants
42 42 #-----------------------------------------------------------------------------
43 43
44 44 # Roughly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
45 45 # this is used as a bitmask to extract future-related code flags.
46 46 PyCF_MASK = functools.reduce(operator.or_,
47 47 (getattr(__future__, fname).compiler_flag
48 48 for fname in __future__.all_feature_names))
49 49
50 50 #-----------------------------------------------------------------------------
51 51 # Local utilities
52 52 #-----------------------------------------------------------------------------
53 53
54 54 def code_name(code, number=0):
55 55 """ Compute a (probably) unique name for code for caching.
56 56
57 57 This now expects code to be unicode.
58 58 """
59 59 hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest()
60 60 # Include the number and 12 characters of the hash in the name. It's
61 61 # pretty much impossible that in a single session we'll have collisions
62 62 # even with truncated hashes, and the full one makes tracebacks too long
63 63 return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
64 64
65 65 #-----------------------------------------------------------------------------
66 66 # Classes and functions
67 67 #-----------------------------------------------------------------------------
68 68
69 69 class CachingCompiler(codeop.Compile):
70 70 """A compiler that caches code compiled from interactive statements.
71 71 """
72 72
73 73 def __init__(self):
74 74 codeop.Compile.__init__(self)
75 75
76 76 # This is ugly, but it must be done this way to allow multiple
77 77 # simultaneous ipython instances to coexist. Since Python itself
78 78 # directly accesses the data structures in the linecache module, and
79 79 # the cache therein is global, we must work with that data structure.
80 80 # We must hold a reference to the original checkcache routine and call
81 81 # that in our own check_cache() below, but the special IPython cache
82 82 # must also be shared by all IPython instances. If we were to hold
83 83 # separate caches (one in each CachingCompiler instance), any call made
84 84 # by Python itself to linecache.checkcache() would obliterate the
85 85 # cached data from the other IPython instances.
86 86 if not hasattr(linecache, '_ipython_cache'):
87 87 linecache._ipython_cache = {}
88 88 if not hasattr(linecache, '_checkcache_ori'):
89 89 linecache._checkcache_ori = linecache.checkcache
90 90 # Now, we must monkeypatch the linecache directly so that parts of the
91 91 # stdlib that call it outside our control go through our codepath
92 92 # (otherwise we'd lose our tracebacks).
93 93 linecache.checkcache = check_linecache_ipython
94 94
95 # Caching a dictionary { filename: execution_count } for nicely
96 # rendered tracebacks. The filename corresponds to the filename
97 # argument used for the builtins.compile function.
98 self._filename_map = {}
95 99
96 100 def ast_parse(self, source, filename='<unknown>', symbol='exec'):
97 101 """Parse code to an AST with the current compiler flags active.
98 102
99 103 Arguments are exactly the same as ast.parse (in the standard library),
100 104 and are passed to the built-in compile function."""
101 105 return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
102 106
103 107 def reset_compiler_flags(self):
104 108 """Reset compiler flags to default state."""
105 109 # This value is copied from codeop.Compile.__init__, so if that ever
106 110 # changes, it will need to be updated.
107 111 self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
108 112
109 113 @property
110 114 def compiler_flags(self):
111 115 """Flags currently active in the compilation process.
112 116 """
113 117 return self.flags
114 118
115 119 def get_code_name(self, raw_code, transformed_code, number):
116 120 """Compute filename given the code, and the cell number.
117 121
118 122 Parameters
119 123 ----------
120 124 raw_code : str
121 The raw cell code.
125 The raw cell code.
122 126 transformed_code : str
123 The executable Python source code to cache and compile.
127 The executable Python source code to cache and compile.
124 128 number : int
125 A number which forms part of the code's name. Used for the execution
126 counter.
129 A number which forms part of the code's name. Used for the execution
130 counter.
127 131
128 132 Returns
129 133 -------
130 134 The computed filename.
131 135 """
132 136 return code_name(transformed_code, number)
133 137
134 138 def cache(self, transformed_code, number=0, raw_code=None):
135 139 """Make a name for a block of code, and cache the code.
136 140
137 141 Parameters
138 142 ----------
139 143 transformed_code : str
140 The executable Python source code to cache and compile.
144 The executable Python source code to cache and compile.
141 145 number : int
142 A number which forms part of the code's name. Used for the execution
143 counter.
146 A number which forms part of the code's name. Used for the execution
147 counter.
144 148 raw_code : str
145 The raw code before transformation, if None, set to `transformed_code`.
149 The raw code before transformation, if None, set to `transformed_code`.
146 150
147 151 Returns
148 152 -------
149 153 The name of the cached code (as a string). Pass this as the filename
150 154 argument to compilation, so that tracebacks are correctly hooked up.
151 155 """
152 156 if raw_code is None:
153 157 raw_code = transformed_code
154 158
155 159 name = self.get_code_name(raw_code, transformed_code, number)
160
161 # Save the execution count
162 self._filename_map[name] = number
163
156 164 entry = (
157 165 len(transformed_code),
158 166 time.time(),
159 167 [line + "\n" for line in transformed_code.splitlines()],
160 168 name,
161 169 )
162 170 linecache.cache[name] = entry
163 171 linecache._ipython_cache[name] = entry
164 172 return name
165 173
166 174 @contextmanager
167 175 def extra_flags(self, flags):
168 176 ## bits that we'll set to 1
169 177 turn_on_bits = ~self.flags & flags
170 178
171 179
172 180 self.flags = self.flags | flags
173 181 try:
174 182 yield
175 183 finally:
176 184 # turn off only the bits we turned on so that something like
177 185 # __future__ that set flags stays.
178 186 self.flags &= ~turn_on_bits
179 187
180 188
181 189 def check_linecache_ipython(*args):
182 190 """Call linecache.checkcache() safely protecting our cached values.
183 191 """
184 192 # First call the original checkcache as intended
185 193 linecache._checkcache_ori(*args)
186 194 # Then, update back the cache with our data, so that tracebacks related
187 195 # to our compiled codes can be produced.
188 196 linecache.cache.update(linecache._ipython_cache)
@@ -1,2239 +1,2239 b''
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 α
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 α
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press `<tab>` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\α<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 ``Completer.backslash_combining_completions`` option to ``False``.
63 63
64 64
65 65 Experimental
66 66 ============
67 67
68 68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 69 generate completions both using static analysis of the code, and dynamically
70 70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 71 for Python. The APIs attached to this new mechanism is unstable and will
72 72 raise unless use in an :any:`provisionalcompleter` context manager.
73 73
74 74 You will find that the following are experimental:
75 75
76 76 - :any:`provisionalcompleter`
77 77 - :any:`IPCompleter.completions`
78 78 - :any:`Completion`
79 79 - :any:`rectify_completions`
80 80
81 81 .. note::
82 82
83 83 better name for :any:`rectify_completions` ?
84 84
85 85 We welcome any feedback on these new API, and we also encourage you to try this
86 86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 87 to have extra logging information if :any:`jedi` is crashing, or if current
88 88 IPython completer pending deprecations are returning results not yet handled
89 89 by :any:`jedi`
90 90
91 91 Using Jedi for tab completion allow snippets like the following to work without
92 92 having to execute any code:
93 93
94 94 >>> myvar = ['hello', 42]
95 95 ... myvar[1].bi<tab>
96 96
97 97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 98 executing any code unlike the previously available ``IPCompleter.greedy``
99 99 option.
100 100
101 101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 102 current development version to get better completions.
103 103 """
104 104
105 105
106 106 # Copyright (c) IPython Development Team.
107 107 # Distributed under the terms of the Modified BSD License.
108 108 #
109 109 # Some of this code originated from rlcompleter in the Python standard library
110 110 # Copyright (C) 2001 Python Software Foundation, www.python.org
111 111
112 112
113 113 import builtins as builtin_mod
114 114 import glob
115 115 import inspect
116 116 import itertools
117 117 import keyword
118 118 import os
119 119 import re
120 120 import string
121 121 import sys
122 122 import time
123 123 import unicodedata
124 124 import uuid
125 125 import warnings
126 126 from contextlib import contextmanager
127 127 from importlib import import_module
128 128 from types import SimpleNamespace
129 129 from typing import Iterable, Iterator, List, Tuple, Union, Any, Sequence, Dict, NamedTuple, Pattern, Optional
130 130
131 131 from IPython.core.error import TryNext
132 132 from IPython.core.inputtransformer2 import ESC_MAGIC
133 133 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
134 134 from IPython.core.oinspect import InspectColors
135 135 from IPython.utils import generics
136 136 from IPython.utils.dir2 import dir2, get_real_method
137 137 from IPython.utils.path import ensure_dir_exists
138 138 from IPython.utils.process import arg_split
139 139 from traitlets import Bool, Enum, Int, List as ListTrait, Unicode, default, observe
140 140 from traitlets.config.configurable import Configurable
141 141
142 142 import __main__
143 143
144 144 # skip module docstests
145 145 skip_doctest = True
146 146
147 147 try:
148 148 import jedi
149 149 jedi.settings.case_insensitive_completion = False
150 150 import jedi.api.helpers
151 151 import jedi.api.classes
152 152 JEDI_INSTALLED = True
153 153 except ImportError:
154 154 JEDI_INSTALLED = False
155 155 #-----------------------------------------------------------------------------
156 156 # Globals
157 157 #-----------------------------------------------------------------------------
158 158
159 159 # ranges where we have most of the valid unicode names. We could be more finer
160 160 # grained but is it worth it for performace While unicode have character in the
161 161 # rage 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
162 162 # write this). With below range we cover them all, with a density of ~67%
163 163 # biggest next gap we consider only adds up about 1% density and there are 600
164 164 # gaps that would need hard coding.
165 165 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
166 166
167 167 # Public API
168 168 __all__ = ['Completer','IPCompleter']
169 169
170 170 if sys.platform == 'win32':
171 171 PROTECTABLES = ' '
172 172 else:
173 173 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
174 174
175 175 # Protect against returning an enormous number of completions which the frontend
176 176 # may have trouble processing.
177 177 MATCHES_LIMIT = 500
178 178
179 179 _deprecation_readline_sentinel = object()
180 180
181 181
182 182 class ProvisionalCompleterWarning(FutureWarning):
183 183 """
184 184 Exception raise by an experimental feature in this module.
185 185
186 186 Wrap code in :any:`provisionalcompleter` context manager if you
187 187 are certain you want to use an unstable feature.
188 188 """
189 189 pass
190 190
191 191 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
192 192
193 193 @contextmanager
194 194 def provisionalcompleter(action='ignore'):
195 195 """
196 196 This context manager has to be used in any place where unstable completer
197 197 behavior and API may be called.
198 198
199 199 >>> with provisionalcompleter():
200 200 ... completer.do_experimental_things() # works
201 201
202 202 >>> completer.do_experimental_things() # raises.
203 203
204 204 .. note::
205 205
206 206 Unstable
207 207
208 208 By using this context manager you agree that the API in use may change
209 209 without warning, and that you won't complain if they do so.
210 210
211 211 You also understand that, if the API is not to your liking, you should report
212 212 a bug to explain your use case upstream.
213 213
214 214 We'll be happy to get your feedback, feature requests, and improvements on
215 215 any of the unstable APIs!
216 216 """
217 217 with warnings.catch_warnings():
218 218 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
219 219 yield
220 220
221 221
222 222 def has_open_quotes(s):
223 223 """Return whether a string has open quotes.
224 224
225 225 This simply counts whether the number of quote characters of either type in
226 226 the string is odd.
227 227
228 228 Returns
229 229 -------
230 230 If there is an open quote, the quote character is returned. Else, return
231 231 False.
232 232 """
233 233 # We check " first, then ', so complex cases with nested quotes will get
234 234 # the " to take precedence.
235 235 if s.count('"') % 2:
236 236 return '"'
237 237 elif s.count("'") % 2:
238 238 return "'"
239 239 else:
240 240 return False
241 241
242 242
243 243 def protect_filename(s, protectables=PROTECTABLES):
244 244 """Escape a string to protect certain characters."""
245 245 if set(s) & set(protectables):
246 246 if sys.platform == "win32":
247 247 return '"' + s + '"'
248 248 else:
249 249 return "".join(("\\" + c if c in protectables else c) for c in s)
250 250 else:
251 251 return s
252 252
253 253
254 254 def expand_user(path:str) -> Tuple[str, bool, str]:
255 255 """Expand ``~``-style usernames in strings.
256 256
257 257 This is similar to :func:`os.path.expanduser`, but it computes and returns
258 258 extra information that will be useful if the input was being used in
259 259 computing completions, and you wish to return the completions with the
260 260 original '~' instead of its expanded value.
261 261
262 262 Parameters
263 263 ----------
264 264 path : str
265 265 String to be expanded. If no ~ is present, the output is the same as the
266 266 input.
267 267
268 268 Returns
269 269 -------
270 270 newpath : str
271 271 Result of ~ expansion in the input path.
272 272 tilde_expand : bool
273 273 Whether any expansion was performed or not.
274 274 tilde_val : str
275 275 The value that ~ was replaced with.
276 276 """
277 277 # Default values
278 278 tilde_expand = False
279 279 tilde_val = ''
280 280 newpath = path
281 281
282 282 if path.startswith('~'):
283 283 tilde_expand = True
284 284 rest = len(path)-1
285 285 newpath = os.path.expanduser(path)
286 286 if rest:
287 287 tilde_val = newpath[:-rest]
288 288 else:
289 289 tilde_val = newpath
290 290
291 291 return newpath, tilde_expand, tilde_val
292 292
293 293
294 294 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
295 295 """Does the opposite of expand_user, with its outputs.
296 296 """
297 297 if tilde_expand:
298 298 return path.replace(tilde_val, '~')
299 299 else:
300 300 return path
301 301
302 302
303 303 def completions_sorting_key(word):
304 304 """key for sorting completions
305 305
306 306 This does several things:
307 307
308 308 - Demote any completions starting with underscores to the end
309 309 - Insert any %magic and %%cellmagic completions in the alphabetical order
310 310 by their name
311 311 """
312 312 prio1, prio2 = 0, 0
313 313
314 314 if word.startswith('__'):
315 315 prio1 = 2
316 316 elif word.startswith('_'):
317 317 prio1 = 1
318 318
319 319 if word.endswith('='):
320 320 prio1 = -1
321 321
322 322 if word.startswith('%%'):
323 323 # If there's another % in there, this is something else, so leave it alone
324 324 if not "%" in word[2:]:
325 325 word = word[2:]
326 326 prio2 = 2
327 327 elif word.startswith('%'):
328 328 if not "%" in word[1:]:
329 329 word = word[1:]
330 330 prio2 = 1
331 331
332 332 return prio1, word, prio2
333 333
334 334
335 335 class _FakeJediCompletion:
336 336 """
337 337 This is a workaround to communicate to the UI that Jedi has crashed and to
338 338 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
339 339
340 340 Added in IPython 6.0 so should likely be removed for 7.0
341 341
342 342 """
343 343
344 344 def __init__(self, name):
345 345
346 346 self.name = name
347 347 self.complete = name
348 348 self.type = 'crashed'
349 349 self.name_with_symbols = name
350 350 self.signature = ''
351 351 self._origin = 'fake'
352 352
353 353 def __repr__(self):
354 354 return '<Fake completion object jedi has crashed>'
355 355
356 356
357 357 class Completion:
358 358 """
359 359 Completion object used and return by IPython completers.
360 360
361 361 .. warning::
362 362
363 363 Unstable
364 364
365 365 This function is unstable, API may change without warning.
366 366 It will also raise unless use in proper context manager.
367 367
368 368 This act as a middle ground :any:`Completion` object between the
369 369 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
370 370 object. While Jedi need a lot of information about evaluator and how the
371 371 code should be ran/inspected, PromptToolkit (and other frontend) mostly
372 372 need user facing information.
373 373
374 374 - Which range should be replaced replaced by what.
375 375 - Some metadata (like completion type), or meta information to displayed to
376 376 the use user.
377 377
378 378 For debugging purpose we can also store the origin of the completion (``jedi``,
379 379 ``IPython.python_matches``, ``IPython.magics_matches``...).
380 380 """
381 381
382 382 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
383 383
384 384 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
385 385 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
386 386 "It may change without warnings. "
387 387 "Use in corresponding context manager.",
388 388 category=ProvisionalCompleterWarning, stacklevel=2)
389 389
390 390 self.start = start
391 391 self.end = end
392 392 self.text = text
393 393 self.type = type
394 394 self.signature = signature
395 395 self._origin = _origin
396 396
397 397 def __repr__(self):
398 398 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
399 399 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
400 400
401 401 def __eq__(self, other)->Bool:
402 402 """
403 403 Equality and hash do not hash the type (as some completer may not be
404 404 able to infer the type), but are use to (partially) de-duplicate
405 405 completion.
406 406
407 407 Completely de-duplicating completion is a bit tricker that just
408 408 comparing as it depends on surrounding text, which Completions are not
409 409 aware of.
410 410 """
411 411 return self.start == other.start and \
412 412 self.end == other.end and \
413 413 self.text == other.text
414 414
415 415 def __hash__(self):
416 416 return hash((self.start, self.end, self.text))
417 417
418 418
419 419 _IC = Iterable[Completion]
420 420
421 421
422 422 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
423 423 """
424 424 Deduplicate a set of completions.
425 425
426 426 .. warning::
427 427
428 428 Unstable
429 429
430 430 This function is unstable, API may change without warning.
431 431
432 432 Parameters
433 433 ----------
434 text: str
434 text : str
435 435 text that should be completed.
436 completions: Iterator[Completion]
436 completions : Iterator[Completion]
437 437 iterator over the completions to deduplicate
438 438
439 439 Yields
440 440 ------
441 441 `Completions` objects
442 442 Completions coming from multiple sources, may be different but end up having
443 443 the same effect when applied to ``text``. If this is the case, this will
444 444 consider completions as equal and only emit the first encountered.
445 445 Not folded in `completions()` yet for debugging purpose, and to detect when
446 446 the IPython completer does return things that Jedi does not, but should be
447 447 at some point.
448 448 """
449 449 completions = list(completions)
450 450 if not completions:
451 451 return
452 452
453 453 new_start = min(c.start for c in completions)
454 454 new_end = max(c.end for c in completions)
455 455
456 456 seen = set()
457 457 for c in completions:
458 458 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
459 459 if new_text not in seen:
460 460 yield c
461 461 seen.add(new_text)
462 462
463 463
464 464 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
465 465 """
466 466 Rectify a set of completions to all have the same ``start`` and ``end``
467 467
468 468 .. warning::
469 469
470 470 Unstable
471 471
472 472 This function is unstable, API may change without warning.
473 473 It will also raise unless use in proper context manager.
474 474
475 475 Parameters
476 476 ----------
477 text: str
477 text : str
478 478 text that should be completed.
479 completions: Iterator[Completion]
479 completions : Iterator[Completion]
480 480 iterator over the completions to rectify
481 481
482 482 Notes
483 483 -----
484 484 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
485 485 the Jupyter Protocol requires them to behave like so. This will readjust
486 486 the completion to have the same ``start`` and ``end`` by padding both
487 487 extremities with surrounding text.
488 488
489 489 During stabilisation should support a ``_debug`` option to log which
490 490 completion are return by the IPython completer and not found in Jedi in
491 491 order to make upstream bug report.
492 492 """
493 493 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
494 494 "It may change without warnings. "
495 495 "Use in corresponding context manager.",
496 496 category=ProvisionalCompleterWarning, stacklevel=2)
497 497
498 498 completions = list(completions)
499 499 if not completions:
500 500 return
501 501 starts = (c.start for c in completions)
502 502 ends = (c.end for c in completions)
503 503
504 504 new_start = min(starts)
505 505 new_end = max(ends)
506 506
507 507 seen_jedi = set()
508 508 seen_python_matches = set()
509 509 for c in completions:
510 510 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
511 511 if c._origin == 'jedi':
512 512 seen_jedi.add(new_text)
513 513 elif c._origin == 'IPCompleter.python_matches':
514 514 seen_python_matches.add(new_text)
515 515 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
516 516 diff = seen_python_matches.difference(seen_jedi)
517 517 if diff and _debug:
518 518 print('IPython.python matches have extras:', diff)
519 519
520 520
521 521 if sys.platform == 'win32':
522 522 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
523 523 else:
524 524 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
525 525
526 526 GREEDY_DELIMS = ' =\r\n'
527 527
528 528
529 529 class CompletionSplitter(object):
530 530 """An object to split an input line in a manner similar to readline.
531 531
532 532 By having our own implementation, we can expose readline-like completion in
533 533 a uniform manner to all frontends. This object only needs to be given the
534 534 line of text to be split and the cursor position on said line, and it
535 535 returns the 'word' to be completed on at the cursor after splitting the
536 536 entire line.
537 537
538 538 What characters are used as splitting delimiters can be controlled by
539 539 setting the ``delims`` attribute (this is a property that internally
540 540 automatically builds the necessary regular expression)"""
541 541
542 542 # Private interface
543 543
544 544 # A string of delimiter characters. The default value makes sense for
545 545 # IPython's most typical usage patterns.
546 546 _delims = DELIMS
547 547
548 548 # The expression (a normal string) to be compiled into a regular expression
549 549 # for actual splitting. We store it as an attribute mostly for ease of
550 550 # debugging, since this type of code can be so tricky to debug.
551 551 _delim_expr = None
552 552
553 553 # The regular expression that does the actual splitting
554 554 _delim_re = None
555 555
556 556 def __init__(self, delims=None):
557 557 delims = CompletionSplitter._delims if delims is None else delims
558 558 self.delims = delims
559 559
560 560 @property
561 561 def delims(self):
562 562 """Return the string of delimiter characters."""
563 563 return self._delims
564 564
565 565 @delims.setter
566 566 def delims(self, delims):
567 567 """Set the delimiters for line splitting."""
568 568 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
569 569 self._delim_re = re.compile(expr)
570 570 self._delims = delims
571 571 self._delim_expr = expr
572 572
573 573 def split_line(self, line, cursor_pos=None):
574 574 """Split a line of text with a cursor at the given position.
575 575 """
576 576 l = line if cursor_pos is None else line[:cursor_pos]
577 577 return self._delim_re.split(l)[-1]
578 578
579 579
580 580
581 581 class Completer(Configurable):
582 582
583 583 greedy = Bool(False,
584 584 help="""Activate greedy completion
585 585 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
586 586
587 587 This will enable completion on elements of lists, results of function calls, etc.,
588 588 but can be unsafe because the code is actually evaluated on TAB.
589 589 """
590 590 ).tag(config=True)
591 591
592 592 use_jedi = Bool(default_value=JEDI_INSTALLED,
593 593 help="Experimental: Use Jedi to generate autocompletions. "
594 594 "Default to True if jedi is installed.").tag(config=True)
595 595
596 596 jedi_compute_type_timeout = Int(default_value=400,
597 597 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
598 598 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
599 599 performance by preventing jedi to build its cache.
600 600 """).tag(config=True)
601 601
602 602 debug = Bool(default_value=False,
603 603 help='Enable debug for the Completer. Mostly print extra '
604 604 'information for experimental jedi integration.')\
605 605 .tag(config=True)
606 606
607 607 backslash_combining_completions = Bool(True,
608 608 help="Enable unicode completions, e.g. \\alpha<tab> . "
609 609 "Includes completion of latex commands, unicode names, and expanding "
610 610 "unicode characters back to latex commands.").tag(config=True)
611 611
612 612
613 613
614 614 def __init__(self, namespace=None, global_namespace=None, **kwargs):
615 615 """Create a new completer for the command line.
616 616
617 617 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
618 618
619 619 If unspecified, the default namespace where completions are performed
620 620 is __main__ (technically, __main__.__dict__). Namespaces should be
621 621 given as dictionaries.
622 622
623 623 An optional second namespace can be given. This allows the completer
624 624 to handle cases where both the local and global scopes need to be
625 625 distinguished.
626 626 """
627 627
628 628 # Don't bind to namespace quite yet, but flag whether the user wants a
629 629 # specific namespace or to use __main__.__dict__. This will allow us
630 630 # to bind to __main__.__dict__ at completion time, not now.
631 631 if namespace is None:
632 632 self.use_main_ns = True
633 633 else:
634 634 self.use_main_ns = False
635 635 self.namespace = namespace
636 636
637 637 # The global namespace, if given, can be bound directly
638 638 if global_namespace is None:
639 639 self.global_namespace = {}
640 640 else:
641 641 self.global_namespace = global_namespace
642 642
643 643 self.custom_matchers = []
644 644
645 645 super(Completer, self).__init__(**kwargs)
646 646
647 647 def complete(self, text, state):
648 648 """Return the next possible completion for 'text'.
649 649
650 650 This is called successively with state == 0, 1, 2, ... until it
651 651 returns None. The completion should begin with 'text'.
652 652
653 653 """
654 654 if self.use_main_ns:
655 655 self.namespace = __main__.__dict__
656 656
657 657 if state == 0:
658 658 if "." in text:
659 659 self.matches = self.attr_matches(text)
660 660 else:
661 661 self.matches = self.global_matches(text)
662 662 try:
663 663 return self.matches[state]
664 664 except IndexError:
665 665 return None
666 666
667 667 def global_matches(self, text):
668 668 """Compute matches when text is a simple name.
669 669
670 670 Return a list of all keywords, built-in functions and names currently
671 671 defined in self.namespace or self.global_namespace that match.
672 672
673 673 """
674 674 matches = []
675 675 match_append = matches.append
676 676 n = len(text)
677 677 for lst in [keyword.kwlist,
678 678 builtin_mod.__dict__.keys(),
679 679 self.namespace.keys(),
680 680 self.global_namespace.keys()]:
681 681 for word in lst:
682 682 if word[:n] == text and word != "__builtins__":
683 683 match_append(word)
684 684
685 685 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
686 686 for lst in [self.namespace.keys(),
687 687 self.global_namespace.keys()]:
688 688 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
689 689 for word in lst if snake_case_re.match(word)}
690 690 for word in shortened.keys():
691 691 if word[:n] == text and word != "__builtins__":
692 692 match_append(shortened[word])
693 693 return matches
694 694
695 695 def attr_matches(self, text):
696 696 """Compute matches when text contains a dot.
697 697
698 698 Assuming the text is of the form NAME.NAME....[NAME], and is
699 699 evaluatable in self.namespace or self.global_namespace, it will be
700 700 evaluated and its attributes (as revealed by dir()) are used as
701 701 possible completions. (For class instances, class members are
702 702 also considered.)
703 703
704 704 WARNING: this can still invoke arbitrary C code, if an object
705 705 with a __getattr__ hook is evaluated.
706 706
707 707 """
708 708
709 709 # Another option, seems to work great. Catches things like ''.<tab>
710 710 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
711 711
712 712 if m:
713 713 expr, attr = m.group(1, 3)
714 714 elif self.greedy:
715 715 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
716 716 if not m2:
717 717 return []
718 718 expr, attr = m2.group(1,2)
719 719 else:
720 720 return []
721 721
722 722 try:
723 723 obj = eval(expr, self.namespace)
724 724 except:
725 725 try:
726 726 obj = eval(expr, self.global_namespace)
727 727 except:
728 728 return []
729 729
730 730 if self.limit_to__all__ and hasattr(obj, '__all__'):
731 731 words = get__all__entries(obj)
732 732 else:
733 733 words = dir2(obj)
734 734
735 735 try:
736 736 words = generics.complete_object(obj, words)
737 737 except TryNext:
738 738 pass
739 739 except AssertionError:
740 740 raise
741 741 except Exception:
742 742 # Silence errors from completion function
743 743 #raise # dbg
744 744 pass
745 745 # Build match list to return
746 746 n = len(attr)
747 747 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
748 748
749 749
750 750 def get__all__entries(obj):
751 751 """returns the strings in the __all__ attribute"""
752 752 try:
753 753 words = getattr(obj, '__all__')
754 754 except:
755 755 return []
756 756
757 757 return [w for w in words if isinstance(w, str)]
758 758
759 759
760 760 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
761 761 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
762 762 """Used by dict_key_matches, matching the prefix to a list of keys
763 763
764 764 Parameters
765 765 ----------
766 keys:
766 keys
767 767 list of keys in dictionary currently being completed.
768 prefix:
768 prefix
769 769 Part of the text already typed by the user. E.g. `mydict[b'fo`
770 delims:
770 delims
771 771 String of delimiters to consider when finding the current key.
772 extra_prefix: optional
772 extra_prefix : optional
773 773 Part of the text already typed in multi-key index cases. E.g. for
774 774 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
775 775
776 776 Returns
777 777 -------
778 778 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
779 779 ``quote`` being the quote that need to be used to close current string.
780 780 ``token_start`` the position where the replacement should start occurring,
781 781 ``matches`` a list of replacement/completion
782 782
783 783 """
784 784 prefix_tuple = extra_prefix if extra_prefix else ()
785 785 Nprefix = len(prefix_tuple)
786 786 def filter_prefix_tuple(key):
787 787 # Reject too short keys
788 788 if len(key) <= Nprefix:
789 789 return False
790 790 # Reject keys with non str/bytes in it
791 791 for k in key:
792 792 if not isinstance(k, (str, bytes)):
793 793 return False
794 794 # Reject keys that do not match the prefix
795 795 for k, pt in zip(key, prefix_tuple):
796 796 if k != pt:
797 797 return False
798 798 # All checks passed!
799 799 return True
800 800
801 801 filtered_keys:List[Union[str,bytes]] = []
802 802 def _add_to_filtered_keys(key):
803 803 if isinstance(key, (str, bytes)):
804 804 filtered_keys.append(key)
805 805
806 806 for k in keys:
807 807 if isinstance(k, tuple):
808 808 if filter_prefix_tuple(k):
809 809 _add_to_filtered_keys(k[Nprefix])
810 810 else:
811 811 _add_to_filtered_keys(k)
812 812
813 813 if not prefix:
814 814 return '', 0, [repr(k) for k in filtered_keys]
815 815 quote_match = re.search('["\']', prefix)
816 816 assert quote_match is not None # silence mypy
817 817 quote = quote_match.group()
818 818 try:
819 819 prefix_str = eval(prefix + quote, {})
820 820 except Exception:
821 821 return '', 0, []
822 822
823 823 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
824 824 token_match = re.search(pattern, prefix, re.UNICODE)
825 825 assert token_match is not None # silence mypy
826 826 token_start = token_match.start()
827 827 token_prefix = token_match.group()
828 828
829 829 matched:List[str] = []
830 830 for key in filtered_keys:
831 831 try:
832 832 if not key.startswith(prefix_str):
833 833 continue
834 834 except (AttributeError, TypeError, UnicodeError):
835 835 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
836 836 continue
837 837
838 838 # reformat remainder of key to begin with prefix
839 839 rem = key[len(prefix_str):]
840 840 # force repr wrapped in '
841 841 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
842 842 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
843 843 if quote == '"':
844 844 # The entered prefix is quoted with ",
845 845 # but the match is quoted with '.
846 846 # A contained " hence needs escaping for comparison:
847 847 rem_repr = rem_repr.replace('"', '\\"')
848 848
849 849 # then reinsert prefix from start of token
850 850 matched.append('%s%s' % (token_prefix, rem_repr))
851 851 return quote, token_start, matched
852 852
853 853
854 854 def cursor_to_position(text:str, line:int, column:int)->int:
855 855 """
856 856 Convert the (line,column) position of the cursor in text to an offset in a
857 857 string.
858 858
859 859 Parameters
860 860 ----------
861 861 text : str
862 862 The text in which to calculate the cursor offset
863 863 line : int
864 864 Line of the cursor; 0-indexed
865 865 column : int
866 866 Column of the cursor 0-indexed
867 867
868 868 Returns
869 869 -------
870 870 Position of the cursor in ``text``, 0-indexed.
871 871
872 872 See Also
873 873 --------
874 874 position_to_cursor : reciprocal of this function
875 875
876 876 """
877 877 lines = text.split('\n')
878 878 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
879 879
880 880 return sum(len(l) + 1 for l in lines[:line]) + column
881 881
882 882 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
883 883 """
884 884 Convert the position of the cursor in text (0 indexed) to a line
885 885 number(0-indexed) and a column number (0-indexed) pair
886 886
887 887 Position should be a valid position in ``text``.
888 888
889 889 Parameters
890 890 ----------
891 891 text : str
892 892 The text in which to calculate the cursor offset
893 893 offset : int
894 894 Position of the cursor in ``text``, 0-indexed.
895 895
896 896 Returns
897 897 -------
898 898 (line, column) : (int, int)
899 899 Line of the cursor; 0-indexed, column of the cursor 0-indexed
900 900
901 901 See Also
902 902 --------
903 903 cursor_to_position : reciprocal of this function
904 904
905 905 """
906 906
907 907 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
908 908
909 909 before = text[:offset]
910 910 blines = before.split('\n') # ! splitnes trim trailing \n
911 911 line = before.count('\n')
912 912 col = len(blines[-1])
913 913 return line, col
914 914
915 915
916 916 def _safe_isinstance(obj, module, class_name):
917 917 """Checks if obj is an instance of module.class_name if loaded
918 918 """
919 919 return (module in sys.modules and
920 920 isinstance(obj, getattr(import_module(module), class_name)))
921 921
922 922 def back_unicode_name_matches(text:str) -> Tuple[str, Sequence[str]]:
923 923 """Match Unicode characters back to Unicode name
924 924
925 925 This does ``☃`` -> ``\\snowman``
926 926
927 927 Note that snowman is not a valid python3 combining character but will be expanded.
928 928 Though it will not recombine back to the snowman character by the completion machinery.
929 929
930 930 This will not either back-complete standard sequences like \\n, \\b ...
931 931
932 932 Returns
933 933 =======
934 934
935 935 Return a tuple with two elements:
936 936
937 937 - The Unicode character that was matched (preceded with a backslash), or
938 938 empty string,
939 939 - a sequence (of 1), name for the match Unicode character, preceded by
940 940 backslash, or empty if no match.
941 941
942 942 """
943 943 if len(text)<2:
944 944 return '', ()
945 945 maybe_slash = text[-2]
946 946 if maybe_slash != '\\':
947 947 return '', ()
948 948
949 949 char = text[-1]
950 950 # no expand on quote for completion in strings.
951 951 # nor backcomplete standard ascii keys
952 952 if char in string.ascii_letters or char in ('"',"'"):
953 953 return '', ()
954 954 try :
955 955 unic = unicodedata.name(char)
956 956 return '\\'+char,('\\'+unic,)
957 957 except KeyError:
958 958 pass
959 959 return '', ()
960 960
961 961 def back_latex_name_matches(text:str) -> Tuple[str, Sequence[str]] :
962 962 """Match latex characters back to unicode name
963 963
964 964 This does ``\\ℵ`` -> ``\\aleph``
965 965
966 966 """
967 967 if len(text)<2:
968 968 return '', ()
969 969 maybe_slash = text[-2]
970 970 if maybe_slash != '\\':
971 971 return '', ()
972 972
973 973
974 974 char = text[-1]
975 975 # no expand on quote for completion in strings.
976 976 # nor backcomplete standard ascii keys
977 977 if char in string.ascii_letters or char in ('"',"'"):
978 978 return '', ()
979 979 try :
980 980 latex = reverse_latex_symbol[char]
981 981 # '\\' replace the \ as well
982 982 return '\\'+char,[latex]
983 983 except KeyError:
984 984 pass
985 985 return '', ()
986 986
987 987
988 988 def _formatparamchildren(parameter) -> str:
989 989 """
990 990 Get parameter name and value from Jedi Private API
991 991
992 992 Jedi does not expose a simple way to get `param=value` from its API.
993 993
994 994 Parameters
995 995 ----------
996 parameter:
996 parameter
997 997 Jedi's function `Param`
998 998
999 999 Returns
1000 1000 -------
1001 1001 A string like 'a', 'b=1', '*args', '**kwargs'
1002 1002
1003 1003 """
1004 1004 description = parameter.description
1005 1005 if not description.startswith('param '):
1006 1006 raise ValueError('Jedi function parameter description have change format.'
1007 1007 'Expected "param ...", found %r".' % description)
1008 1008 return description[6:]
1009 1009
1010 1010 def _make_signature(completion)-> str:
1011 1011 """
1012 1012 Make the signature from a jedi completion
1013 1013
1014 1014 Parameters
1015 1015 ----------
1016 completion: jedi.Completion
1016 completion : jedi.Completion
1017 1017 object does not complete a function type
1018 1018
1019 1019 Returns
1020 1020 -------
1021 1021 a string consisting of the function signature, with the parenthesis but
1022 1022 without the function name. example:
1023 1023 `(a, *args, b=1, **kwargs)`
1024 1024
1025 1025 """
1026 1026
1027 1027 # it looks like this might work on jedi 0.17
1028 1028 if hasattr(completion, 'get_signatures'):
1029 1029 signatures = completion.get_signatures()
1030 1030 if not signatures:
1031 1031 return '(?)'
1032 1032
1033 1033 c0 = completion.get_signatures()[0]
1034 1034 return '('+c0.to_string().split('(', maxsplit=1)[1]
1035 1035
1036 1036 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1037 1037 for p in signature.defined_names()) if f])
1038 1038
1039 1039
1040 1040 class _CompleteResult(NamedTuple):
1041 1041 matched_text : str
1042 1042 matches: Sequence[str]
1043 1043 matches_origin: Sequence[str]
1044 1044 jedi_matches: Any
1045 1045
1046 1046
1047 1047 class IPCompleter(Completer):
1048 1048 """Extension of the completer class with IPython-specific features"""
1049 1049
1050 1050 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1051 1051
1052 1052 @observe('greedy')
1053 1053 def _greedy_changed(self, change):
1054 1054 """update the splitter and readline delims when greedy is changed"""
1055 1055 if change['new']:
1056 1056 self.splitter.delims = GREEDY_DELIMS
1057 1057 else:
1058 1058 self.splitter.delims = DELIMS
1059 1059
1060 1060 dict_keys_only = Bool(False,
1061 1061 help="""Whether to show dict key matches only""")
1062 1062
1063 1063 merge_completions = Bool(True,
1064 1064 help="""Whether to merge completion results into a single list
1065 1065
1066 1066 If False, only the completion results from the first non-empty
1067 1067 completer will be returned.
1068 1068 """
1069 1069 ).tag(config=True)
1070 1070 omit__names = Enum((0,1,2), default_value=2,
1071 1071 help="""Instruct the completer to omit private method names
1072 1072
1073 1073 Specifically, when completing on ``object.<tab>``.
1074 1074
1075 1075 When 2 [default]: all names that start with '_' will be excluded.
1076 1076
1077 1077 When 1: all 'magic' names (``__foo__``) will be excluded.
1078 1078
1079 1079 When 0: nothing will be excluded.
1080 1080 """
1081 1081 ).tag(config=True)
1082 1082 limit_to__all__ = Bool(False,
1083 1083 help="""
1084 1084 DEPRECATED as of version 5.0.
1085 1085
1086 1086 Instruct the completer to use __all__ for the completion
1087 1087
1088 1088 Specifically, when completing on ``object.<tab>``.
1089 1089
1090 1090 When True: only those names in obj.__all__ will be included.
1091 1091
1092 1092 When False [default]: the __all__ attribute is ignored
1093 1093 """,
1094 1094 ).tag(config=True)
1095 1095
1096 1096 profile_completions = Bool(
1097 1097 default_value=False,
1098 1098 help="If True, emit profiling data for completion subsystem using cProfile."
1099 1099 ).tag(config=True)
1100 1100
1101 1101 profiler_output_dir = Unicode(
1102 1102 default_value=".completion_profiles",
1103 1103 help="Template for path at which to output profile data for completions."
1104 1104 ).tag(config=True)
1105 1105
1106 1106 @observe('limit_to__all__')
1107 1107 def _limit_to_all_changed(self, change):
1108 1108 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1109 1109 'value has been deprecated since IPython 5.0, will be made to have '
1110 1110 'no effects and then removed in future version of IPython.',
1111 1111 UserWarning)
1112 1112
1113 1113 def __init__(self, shell=None, namespace=None, global_namespace=None,
1114 1114 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
1115 1115 """IPCompleter() -> completer
1116 1116
1117 1117 Return a completer object.
1118 1118
1119 1119 Parameters
1120 1120 ----------
1121 1121 shell
1122 1122 a pointer to the ipython shell itself. This is needed
1123 1123 because this completer knows about magic functions, and those can
1124 1124 only be accessed via the ipython instance.
1125 1125 namespace : dict, optional
1126 1126 an optional dict where completions are performed.
1127 1127 global_namespace : dict, optional
1128 1128 secondary optional dict for completions, to
1129 1129 handle cases (such as IPython embedded inside functions) where
1130 1130 both Python scopes are visible.
1131 1131 use_readline : bool, optional
1132 1132 DEPRECATED, ignored since IPython 6.0, will have no effects
1133 1133 """
1134 1134
1135 1135 self.magic_escape = ESC_MAGIC
1136 1136 self.splitter = CompletionSplitter()
1137 1137
1138 1138 if use_readline is not _deprecation_readline_sentinel:
1139 1139 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1140 1140 DeprecationWarning, stacklevel=2)
1141 1141
1142 1142 # _greedy_changed() depends on splitter and readline being defined:
1143 1143 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1144 1144 config=config, **kwargs)
1145 1145
1146 1146 # List where completion matches will be stored
1147 1147 self.matches = []
1148 1148 self.shell = shell
1149 1149 # Regexp to split filenames with spaces in them
1150 1150 self.space_name_re = re.compile(r'([^\\] )')
1151 1151 # Hold a local ref. to glob.glob for speed
1152 1152 self.glob = glob.glob
1153 1153
1154 1154 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1155 1155 # buffers, to avoid completion problems.
1156 1156 term = os.environ.get('TERM','xterm')
1157 1157 self.dumb_terminal = term in ['dumb','emacs']
1158 1158
1159 1159 # Special handling of backslashes needed in win32 platforms
1160 1160 if sys.platform == "win32":
1161 1161 self.clean_glob = self._clean_glob_win32
1162 1162 else:
1163 1163 self.clean_glob = self._clean_glob
1164 1164
1165 1165 #regexp to parse docstring for function signature
1166 1166 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1167 1167 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1168 1168 #use this if positional argument name is also needed
1169 1169 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1170 1170
1171 1171 self.magic_arg_matchers = [
1172 1172 self.magic_config_matches,
1173 1173 self.magic_color_matches,
1174 1174 ]
1175 1175
1176 1176 # This is set externally by InteractiveShell
1177 1177 self.custom_completers = None
1178 1178
1179 1179 # This is a list of names of unicode characters that can be completed
1180 1180 # into their corresponding unicode value. The list is large, so we
1181 1181 # laziliy initialize it on first use. Consuming code should access this
1182 1182 # attribute through the `@unicode_names` property.
1183 1183 self._unicode_names = None
1184 1184
1185 1185 @property
1186 1186 def matchers(self) -> List[Any]:
1187 1187 """All active matcher routines for completion"""
1188 1188 if self.dict_keys_only:
1189 1189 return [self.dict_key_matches]
1190 1190
1191 1191 if self.use_jedi:
1192 1192 return [
1193 1193 *self.custom_matchers,
1194 1194 self.file_matches,
1195 1195 self.magic_matches,
1196 1196 self.dict_key_matches,
1197 1197 ]
1198 1198 else:
1199 1199 return [
1200 1200 *self.custom_matchers,
1201 1201 self.python_matches,
1202 1202 self.file_matches,
1203 1203 self.magic_matches,
1204 1204 self.python_func_kw_matches,
1205 1205 self.dict_key_matches,
1206 1206 ]
1207 1207
1208 1208 def all_completions(self, text:str) -> List[str]:
1209 1209 """
1210 1210 Wrapper around the completion methods for the benefit of emacs.
1211 1211 """
1212 1212 prefix = text.rpartition('.')[0]
1213 1213 with provisionalcompleter():
1214 1214 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1215 1215 for c in self.completions(text, len(text))]
1216 1216
1217 1217 return self.complete(text)[1]
1218 1218
1219 1219 def _clean_glob(self, text:str):
1220 1220 return self.glob("%s*" % text)
1221 1221
1222 1222 def _clean_glob_win32(self, text:str):
1223 1223 return [f.replace("\\","/")
1224 1224 for f in self.glob("%s*" % text)]
1225 1225
1226 1226 def file_matches(self, text:str)->List[str]:
1227 1227 """Match filenames, expanding ~USER type strings.
1228 1228
1229 1229 Most of the seemingly convoluted logic in this completer is an
1230 1230 attempt to handle filenames with spaces in them. And yet it's not
1231 1231 quite perfect, because Python's readline doesn't expose all of the
1232 1232 GNU readline details needed for this to be done correctly.
1233 1233
1234 1234 For a filename with a space in it, the printed completions will be
1235 1235 only the parts after what's already been typed (instead of the
1236 1236 full completions, as is normally done). I don't think with the
1237 1237 current (as of Python 2.3) Python readline it's possible to do
1238 1238 better."""
1239 1239
1240 1240 # chars that require escaping with backslash - i.e. chars
1241 1241 # that readline treats incorrectly as delimiters, but we
1242 1242 # don't want to treat as delimiters in filename matching
1243 1243 # when escaped with backslash
1244 1244 if text.startswith('!'):
1245 1245 text = text[1:]
1246 1246 text_prefix = u'!'
1247 1247 else:
1248 1248 text_prefix = u''
1249 1249
1250 1250 text_until_cursor = self.text_until_cursor
1251 1251 # track strings with open quotes
1252 1252 open_quotes = has_open_quotes(text_until_cursor)
1253 1253
1254 1254 if '(' in text_until_cursor or '[' in text_until_cursor:
1255 1255 lsplit = text
1256 1256 else:
1257 1257 try:
1258 1258 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1259 1259 lsplit = arg_split(text_until_cursor)[-1]
1260 1260 except ValueError:
1261 1261 # typically an unmatched ", or backslash without escaped char.
1262 1262 if open_quotes:
1263 1263 lsplit = text_until_cursor.split(open_quotes)[-1]
1264 1264 else:
1265 1265 return []
1266 1266 except IndexError:
1267 1267 # tab pressed on empty line
1268 1268 lsplit = ""
1269 1269
1270 1270 if not open_quotes and lsplit != protect_filename(lsplit):
1271 1271 # if protectables are found, do matching on the whole escaped name
1272 1272 has_protectables = True
1273 1273 text0,text = text,lsplit
1274 1274 else:
1275 1275 has_protectables = False
1276 1276 text = os.path.expanduser(text)
1277 1277
1278 1278 if text == "":
1279 1279 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1280 1280
1281 1281 # Compute the matches from the filesystem
1282 1282 if sys.platform == 'win32':
1283 1283 m0 = self.clean_glob(text)
1284 1284 else:
1285 1285 m0 = self.clean_glob(text.replace('\\', ''))
1286 1286
1287 1287 if has_protectables:
1288 1288 # If we had protectables, we need to revert our changes to the
1289 1289 # beginning of filename so that we don't double-write the part
1290 1290 # of the filename we have so far
1291 1291 len_lsplit = len(lsplit)
1292 1292 matches = [text_prefix + text0 +
1293 1293 protect_filename(f[len_lsplit:]) for f in m0]
1294 1294 else:
1295 1295 if open_quotes:
1296 1296 # if we have a string with an open quote, we don't need to
1297 1297 # protect the names beyond the quote (and we _shouldn't_, as
1298 1298 # it would cause bugs when the filesystem call is made).
1299 1299 matches = m0 if sys.platform == "win32" else\
1300 1300 [protect_filename(f, open_quotes) for f in m0]
1301 1301 else:
1302 1302 matches = [text_prefix +
1303 1303 protect_filename(f) for f in m0]
1304 1304
1305 1305 # Mark directories in input list by appending '/' to their names.
1306 1306 return [x+'/' if os.path.isdir(x) else x for x in matches]
1307 1307
1308 1308 def magic_matches(self, text:str):
1309 1309 """Match magics"""
1310 1310 # Get all shell magics now rather than statically, so magics loaded at
1311 1311 # runtime show up too.
1312 1312 lsm = self.shell.magics_manager.lsmagic()
1313 1313 line_magics = lsm['line']
1314 1314 cell_magics = lsm['cell']
1315 1315 pre = self.magic_escape
1316 1316 pre2 = pre+pre
1317 1317
1318 1318 explicit_magic = text.startswith(pre)
1319 1319
1320 1320 # Completion logic:
1321 1321 # - user gives %%: only do cell magics
1322 1322 # - user gives %: do both line and cell magics
1323 1323 # - no prefix: do both
1324 1324 # In other words, line magics are skipped if the user gives %% explicitly
1325 1325 #
1326 1326 # We also exclude magics that match any currently visible names:
1327 1327 # https://github.com/ipython/ipython/issues/4877, unless the user has
1328 1328 # typed a %:
1329 1329 # https://github.com/ipython/ipython/issues/10754
1330 1330 bare_text = text.lstrip(pre)
1331 1331 global_matches = self.global_matches(bare_text)
1332 1332 if not explicit_magic:
1333 1333 def matches(magic):
1334 1334 """
1335 1335 Filter magics, in particular remove magics that match
1336 1336 a name present in global namespace.
1337 1337 """
1338 1338 return ( magic.startswith(bare_text) and
1339 1339 magic not in global_matches )
1340 1340 else:
1341 1341 def matches(magic):
1342 1342 return magic.startswith(bare_text)
1343 1343
1344 1344 comp = [ pre2+m for m in cell_magics if matches(m)]
1345 1345 if not text.startswith(pre2):
1346 1346 comp += [ pre+m for m in line_magics if matches(m)]
1347 1347
1348 1348 return comp
1349 1349
1350 1350 def magic_config_matches(self, text:str) -> List[str]:
1351 1351 """ Match class names and attributes for %config magic """
1352 1352 texts = text.strip().split()
1353 1353
1354 1354 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1355 1355 # get all configuration classes
1356 1356 classes = sorted(set([ c for c in self.shell.configurables
1357 1357 if c.__class__.class_traits(config=True)
1358 1358 ]), key=lambda x: x.__class__.__name__)
1359 1359 classnames = [ c.__class__.__name__ for c in classes ]
1360 1360
1361 1361 # return all classnames if config or %config is given
1362 1362 if len(texts) == 1:
1363 1363 return classnames
1364 1364
1365 1365 # match classname
1366 1366 classname_texts = texts[1].split('.')
1367 1367 classname = classname_texts[0]
1368 1368 classname_matches = [ c for c in classnames
1369 1369 if c.startswith(classname) ]
1370 1370
1371 1371 # return matched classes or the matched class with attributes
1372 1372 if texts[1].find('.') < 0:
1373 1373 return classname_matches
1374 1374 elif len(classname_matches) == 1 and \
1375 1375 classname_matches[0] == classname:
1376 1376 cls = classes[classnames.index(classname)].__class__
1377 1377 help = cls.class_get_help()
1378 1378 # strip leading '--' from cl-args:
1379 1379 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1380 1380 return [ attr.split('=')[0]
1381 1381 for attr in help.strip().splitlines()
1382 1382 if attr.startswith(texts[1]) ]
1383 1383 return []
1384 1384
1385 1385 def magic_color_matches(self, text:str) -> List[str] :
1386 1386 """ Match color schemes for %colors magic"""
1387 1387 texts = text.split()
1388 1388 if text.endswith(' '):
1389 1389 # .split() strips off the trailing whitespace. Add '' back
1390 1390 # so that: '%colors ' -> ['%colors', '']
1391 1391 texts.append('')
1392 1392
1393 1393 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1394 1394 prefix = texts[1]
1395 1395 return [ color for color in InspectColors.keys()
1396 1396 if color.startswith(prefix) ]
1397 1397 return []
1398 1398
1399 1399 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str) -> Iterable[Any]:
1400 1400 """
1401 1401 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1402 1402 cursor position.
1403 1403
1404 1404 Parameters
1405 1405 ----------
1406 1406 cursor_column : int
1407 1407 column position of the cursor in ``text``, 0-indexed.
1408 1408 cursor_line : int
1409 1409 line position of the cursor in ``text``, 0-indexed
1410 1410 text : str
1411 1411 text to complete
1412 1412
1413 1413 Notes
1414 1414 -----
1415 1415 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1416 1416 object containing a string with the Jedi debug information attached.
1417 1417 """
1418 1418 namespaces = [self.namespace]
1419 1419 if self.global_namespace is not None:
1420 1420 namespaces.append(self.global_namespace)
1421 1421
1422 1422 completion_filter = lambda x:x
1423 1423 offset = cursor_to_position(text, cursor_line, cursor_column)
1424 1424 # filter output if we are completing for object members
1425 1425 if offset:
1426 1426 pre = text[offset-1]
1427 1427 if pre == '.':
1428 1428 if self.omit__names == 2:
1429 1429 completion_filter = lambda c:not c.name.startswith('_')
1430 1430 elif self.omit__names == 1:
1431 1431 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1432 1432 elif self.omit__names == 0:
1433 1433 completion_filter = lambda x:x
1434 1434 else:
1435 1435 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1436 1436
1437 1437 interpreter = jedi.Interpreter(text[:offset], namespaces)
1438 1438 try_jedi = True
1439 1439
1440 1440 try:
1441 1441 # find the first token in the current tree -- if it is a ' or " then we are in a string
1442 1442 completing_string = False
1443 1443 try:
1444 1444 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1445 1445 except StopIteration:
1446 1446 pass
1447 1447 else:
1448 1448 # note the value may be ', ", or it may also be ''' or """, or
1449 1449 # in some cases, """what/you/typed..., but all of these are
1450 1450 # strings.
1451 1451 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1452 1452
1453 1453 # if we are in a string jedi is likely not the right candidate for
1454 1454 # now. Skip it.
1455 1455 try_jedi = not completing_string
1456 1456 except Exception as e:
1457 1457 # many of things can go wrong, we are using private API just don't crash.
1458 1458 if self.debug:
1459 1459 print("Error detecting if completing a non-finished string :", e, '|')
1460 1460
1461 1461 if not try_jedi:
1462 1462 return []
1463 1463 try:
1464 1464 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1465 1465 except Exception as e:
1466 1466 if self.debug:
1467 1467 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1468 1468 else:
1469 1469 return []
1470 1470
1471 1471 def python_matches(self, text:str)->List[str]:
1472 1472 """Match attributes or global python names"""
1473 1473 if "." in text:
1474 1474 try:
1475 1475 matches = self.attr_matches(text)
1476 1476 if text.endswith('.') and self.omit__names:
1477 1477 if self.omit__names == 1:
1478 1478 # true if txt is _not_ a __ name, false otherwise:
1479 1479 no__name = (lambda txt:
1480 1480 re.match(r'.*\.__.*?__',txt) is None)
1481 1481 else:
1482 1482 # true if txt is _not_ a _ name, false otherwise:
1483 1483 no__name = (lambda txt:
1484 1484 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1485 1485 matches = filter(no__name, matches)
1486 1486 except NameError:
1487 1487 # catches <undefined attributes>.<tab>
1488 1488 matches = []
1489 1489 else:
1490 1490 matches = self.global_matches(text)
1491 1491 return matches
1492 1492
1493 1493 def _default_arguments_from_docstring(self, doc):
1494 1494 """Parse the first line of docstring for call signature.
1495 1495
1496 1496 Docstring should be of the form 'min(iterable[, key=func])\n'.
1497 1497 It can also parse cython docstring of the form
1498 1498 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1499 1499 """
1500 1500 if doc is None:
1501 1501 return []
1502 1502
1503 1503 #care only the firstline
1504 1504 line = doc.lstrip().splitlines()[0]
1505 1505
1506 1506 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1507 1507 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1508 1508 sig = self.docstring_sig_re.search(line)
1509 1509 if sig is None:
1510 1510 return []
1511 1511 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1512 1512 sig = sig.groups()[0].split(',')
1513 1513 ret = []
1514 1514 for s in sig:
1515 1515 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1516 1516 ret += self.docstring_kwd_re.findall(s)
1517 1517 return ret
1518 1518
1519 1519 def _default_arguments(self, obj):
1520 1520 """Return the list of default arguments of obj if it is callable,
1521 1521 or empty list otherwise."""
1522 1522 call_obj = obj
1523 1523 ret = []
1524 1524 if inspect.isbuiltin(obj):
1525 1525 pass
1526 1526 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1527 1527 if inspect.isclass(obj):
1528 1528 #for cython embedsignature=True the constructor docstring
1529 1529 #belongs to the object itself not __init__
1530 1530 ret += self._default_arguments_from_docstring(
1531 1531 getattr(obj, '__doc__', ''))
1532 1532 # for classes, check for __init__,__new__
1533 1533 call_obj = (getattr(obj, '__init__', None) or
1534 1534 getattr(obj, '__new__', None))
1535 1535 # for all others, check if they are __call__able
1536 1536 elif hasattr(obj, '__call__'):
1537 1537 call_obj = obj.__call__
1538 1538 ret += self._default_arguments_from_docstring(
1539 1539 getattr(call_obj, '__doc__', ''))
1540 1540
1541 1541 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1542 1542 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1543 1543
1544 1544 try:
1545 1545 sig = inspect.signature(call_obj)
1546 1546 ret.extend(k for k, v in sig.parameters.items() if
1547 1547 v.kind in _keeps)
1548 1548 except ValueError:
1549 1549 pass
1550 1550
1551 1551 return list(set(ret))
1552 1552
1553 1553 def python_func_kw_matches(self, text):
1554 1554 """Match named parameters (kwargs) of the last open function"""
1555 1555
1556 1556 if "." in text: # a parameter cannot be dotted
1557 1557 return []
1558 1558 try: regexp = self.__funcParamsRegex
1559 1559 except AttributeError:
1560 1560 regexp = self.__funcParamsRegex = re.compile(r'''
1561 1561 '.*?(?<!\\)' | # single quoted strings or
1562 1562 ".*?(?<!\\)" | # double quoted strings or
1563 1563 \w+ | # identifier
1564 1564 \S # other characters
1565 1565 ''', re.VERBOSE | re.DOTALL)
1566 1566 # 1. find the nearest identifier that comes before an unclosed
1567 1567 # parenthesis before the cursor
1568 1568 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1569 1569 tokens = regexp.findall(self.text_until_cursor)
1570 1570 iterTokens = reversed(tokens); openPar = 0
1571 1571
1572 1572 for token in iterTokens:
1573 1573 if token == ')':
1574 1574 openPar -= 1
1575 1575 elif token == '(':
1576 1576 openPar += 1
1577 1577 if openPar > 0:
1578 1578 # found the last unclosed parenthesis
1579 1579 break
1580 1580 else:
1581 1581 return []
1582 1582 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1583 1583 ids = []
1584 1584 isId = re.compile(r'\w+$').match
1585 1585
1586 1586 while True:
1587 1587 try:
1588 1588 ids.append(next(iterTokens))
1589 1589 if not isId(ids[-1]):
1590 1590 ids.pop(); break
1591 1591 if not next(iterTokens) == '.':
1592 1592 break
1593 1593 except StopIteration:
1594 1594 break
1595 1595
1596 1596 # Find all named arguments already assigned to, as to avoid suggesting
1597 1597 # them again
1598 1598 usedNamedArgs = set()
1599 1599 par_level = -1
1600 1600 for token, next_token in zip(tokens, tokens[1:]):
1601 1601 if token == '(':
1602 1602 par_level += 1
1603 1603 elif token == ')':
1604 1604 par_level -= 1
1605 1605
1606 1606 if par_level != 0:
1607 1607 continue
1608 1608
1609 1609 if next_token != '=':
1610 1610 continue
1611 1611
1612 1612 usedNamedArgs.add(token)
1613 1613
1614 1614 argMatches = []
1615 1615 try:
1616 1616 callableObj = '.'.join(ids[::-1])
1617 1617 namedArgs = self._default_arguments(eval(callableObj,
1618 1618 self.namespace))
1619 1619
1620 1620 # Remove used named arguments from the list, no need to show twice
1621 1621 for namedArg in set(namedArgs) - usedNamedArgs:
1622 1622 if namedArg.startswith(text):
1623 1623 argMatches.append("%s=" %namedArg)
1624 1624 except:
1625 1625 pass
1626 1626
1627 1627 return argMatches
1628 1628
1629 1629 @staticmethod
1630 1630 def _get_keys(obj: Any) -> List[Any]:
1631 1631 # Objects can define their own completions by defining an
1632 1632 # _ipy_key_completions_() method.
1633 1633 method = get_real_method(obj, '_ipython_key_completions_')
1634 1634 if method is not None:
1635 1635 return method()
1636 1636
1637 1637 # Special case some common in-memory dict-like types
1638 1638 if isinstance(obj, dict) or\
1639 1639 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1640 1640 try:
1641 1641 return list(obj.keys())
1642 1642 except Exception:
1643 1643 return []
1644 1644 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1645 1645 _safe_isinstance(obj, 'numpy', 'void'):
1646 1646 return obj.dtype.names or []
1647 1647 return []
1648 1648
1649 1649 def dict_key_matches(self, text:str) -> List[str]:
1650 1650 "Match string keys in a dictionary, after e.g. 'foo[' "
1651 1651
1652 1652
1653 1653 if self.__dict_key_regexps is not None:
1654 1654 regexps = self.__dict_key_regexps
1655 1655 else:
1656 1656 dict_key_re_fmt = r'''(?x)
1657 1657 ( # match dict-referring expression wrt greedy setting
1658 1658 %s
1659 1659 )
1660 1660 \[ # open bracket
1661 1661 \s* # and optional whitespace
1662 1662 # Capture any number of str-like objects (e.g. "a", "b", 'c')
1663 1663 ((?:[uUbB]? # string prefix (r not handled)
1664 1664 (?:
1665 1665 '(?:[^']|(?<!\\)\\')*'
1666 1666 |
1667 1667 "(?:[^"]|(?<!\\)\\")*"
1668 1668 )
1669 1669 \s*,\s*
1670 1670 )*)
1671 1671 ([uUbB]? # string prefix (r not handled)
1672 1672 (?: # unclosed string
1673 1673 '(?:[^']|(?<!\\)\\')*
1674 1674 |
1675 1675 "(?:[^"]|(?<!\\)\\")*
1676 1676 )
1677 1677 )?
1678 1678 $
1679 1679 '''
1680 1680 regexps = self.__dict_key_regexps = {
1681 1681 False: re.compile(dict_key_re_fmt % r'''
1682 1682 # identifiers separated by .
1683 1683 (?!\d)\w+
1684 1684 (?:\.(?!\d)\w+)*
1685 1685 '''),
1686 1686 True: re.compile(dict_key_re_fmt % '''
1687 1687 .+
1688 1688 ''')
1689 1689 }
1690 1690
1691 1691 match = regexps[self.greedy].search(self.text_until_cursor)
1692 1692
1693 1693 if match is None:
1694 1694 return []
1695 1695
1696 1696 expr, prefix0, prefix = match.groups()
1697 1697 try:
1698 1698 obj = eval(expr, self.namespace)
1699 1699 except Exception:
1700 1700 try:
1701 1701 obj = eval(expr, self.global_namespace)
1702 1702 except Exception:
1703 1703 return []
1704 1704
1705 1705 keys = self._get_keys(obj)
1706 1706 if not keys:
1707 1707 return keys
1708 1708
1709 1709 extra_prefix = eval(prefix0) if prefix0 != '' else None
1710 1710
1711 1711 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
1712 1712 if not matches:
1713 1713 return matches
1714 1714
1715 1715 # get the cursor position of
1716 1716 # - the text being completed
1717 1717 # - the start of the key text
1718 1718 # - the start of the completion
1719 1719 text_start = len(self.text_until_cursor) - len(text)
1720 1720 if prefix:
1721 1721 key_start = match.start(3)
1722 1722 completion_start = key_start + token_offset
1723 1723 else:
1724 1724 key_start = completion_start = match.end()
1725 1725
1726 1726 # grab the leading prefix, to make sure all completions start with `text`
1727 1727 if text_start > key_start:
1728 1728 leading = ''
1729 1729 else:
1730 1730 leading = text[text_start:completion_start]
1731 1731
1732 1732 # the index of the `[` character
1733 1733 bracket_idx = match.end(1)
1734 1734
1735 1735 # append closing quote and bracket as appropriate
1736 1736 # this is *not* appropriate if the opening quote or bracket is outside
1737 1737 # the text given to this method
1738 1738 suf = ''
1739 1739 continuation = self.line_buffer[len(self.text_until_cursor):]
1740 1740 if key_start > text_start and closing_quote:
1741 1741 # quotes were opened inside text, maybe close them
1742 1742 if continuation.startswith(closing_quote):
1743 1743 continuation = continuation[len(closing_quote):]
1744 1744 else:
1745 1745 suf += closing_quote
1746 1746 if bracket_idx > text_start:
1747 1747 # brackets were opened inside text, maybe close them
1748 1748 if not continuation.startswith(']'):
1749 1749 suf += ']'
1750 1750
1751 1751 return [leading + k + suf for k in matches]
1752 1752
1753 1753 @staticmethod
1754 1754 def unicode_name_matches(text:str) -> Tuple[str, List[str]] :
1755 1755 """Match Latex-like syntax for unicode characters base
1756 1756 on the name of the character.
1757 1757
1758 1758 This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
1759 1759
1760 1760 Works only on valid python 3 identifier, or on combining characters that
1761 1761 will combine to form a valid identifier.
1762 1762 """
1763 1763 slashpos = text.rfind('\\')
1764 1764 if slashpos > -1:
1765 1765 s = text[slashpos+1:]
1766 1766 try :
1767 1767 unic = unicodedata.lookup(s)
1768 1768 # allow combining chars
1769 1769 if ('a'+unic).isidentifier():
1770 1770 return '\\'+s,[unic]
1771 1771 except KeyError:
1772 1772 pass
1773 1773 return '', []
1774 1774
1775 1775
1776 1776 def latex_matches(self, text:str) -> Tuple[str, Sequence[str]]:
1777 1777 """Match Latex syntax for unicode characters.
1778 1778
1779 1779 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
1780 1780 """
1781 1781 slashpos = text.rfind('\\')
1782 1782 if slashpos > -1:
1783 1783 s = text[slashpos:]
1784 1784 if s in latex_symbols:
1785 1785 # Try to complete a full latex symbol to unicode
1786 1786 # \\alpha -> α
1787 1787 return s, [latex_symbols[s]]
1788 1788 else:
1789 1789 # If a user has partially typed a latex symbol, give them
1790 1790 # a full list of options \al -> [\aleph, \alpha]
1791 1791 matches = [k for k in latex_symbols if k.startswith(s)]
1792 1792 if matches:
1793 1793 return s, matches
1794 1794 return '', ()
1795 1795
1796 1796 def dispatch_custom_completer(self, text):
1797 1797 if not self.custom_completers:
1798 1798 return
1799 1799
1800 1800 line = self.line_buffer
1801 1801 if not line.strip():
1802 1802 return None
1803 1803
1804 1804 # Create a little structure to pass all the relevant information about
1805 1805 # the current completion to any custom completer.
1806 1806 event = SimpleNamespace()
1807 1807 event.line = line
1808 1808 event.symbol = text
1809 1809 cmd = line.split(None,1)[0]
1810 1810 event.command = cmd
1811 1811 event.text_until_cursor = self.text_until_cursor
1812 1812
1813 1813 # for foo etc, try also to find completer for %foo
1814 1814 if not cmd.startswith(self.magic_escape):
1815 1815 try_magic = self.custom_completers.s_matches(
1816 1816 self.magic_escape + cmd)
1817 1817 else:
1818 1818 try_magic = []
1819 1819
1820 1820 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1821 1821 try_magic,
1822 1822 self.custom_completers.flat_matches(self.text_until_cursor)):
1823 1823 try:
1824 1824 res = c(event)
1825 1825 if res:
1826 1826 # first, try case sensitive match
1827 1827 withcase = [r for r in res if r.startswith(text)]
1828 1828 if withcase:
1829 1829 return withcase
1830 1830 # if none, then case insensitive ones are ok too
1831 1831 text_low = text.lower()
1832 1832 return [r for r in res if r.lower().startswith(text_low)]
1833 1833 except TryNext:
1834 1834 pass
1835 1835 except KeyboardInterrupt:
1836 1836 """
1837 1837 If custom completer take too long,
1838 1838 let keyboard interrupt abort and return nothing.
1839 1839 """
1840 1840 break
1841 1841
1842 1842 return None
1843 1843
1844 1844 def completions(self, text: str, offset: int)->Iterator[Completion]:
1845 1845 """
1846 1846 Returns an iterator over the possible completions
1847 1847
1848 1848 .. warning::
1849 1849
1850 1850 Unstable
1851 1851
1852 1852 This function is unstable, API may change without warning.
1853 1853 It will also raise unless use in proper context manager.
1854 1854
1855 1855 Parameters
1856 1856 ----------
1857 text:str
1857 text : str
1858 1858 Full text of the current input, multi line string.
1859 offset:int
1859 offset : int
1860 1860 Integer representing the position of the cursor in ``text``. Offset
1861 1861 is 0-based indexed.
1862 1862
1863 1863 Yields
1864 1864 ------
1865 1865 Completion
1866 1866
1867 1867 Notes
1868 1868 -----
1869 1869 The cursor on a text can either be seen as being "in between"
1870 1870 characters or "On" a character depending on the interface visible to
1871 1871 the user. For consistency the cursor being on "in between" characters X
1872 1872 and Y is equivalent to the cursor being "on" character Y, that is to say
1873 1873 the character the cursor is on is considered as being after the cursor.
1874 1874
1875 1875 Combining characters may span more that one position in the
1876 1876 text.
1877 1877
1878 1878 .. note::
1879 1879
1880 1880 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1881 1881 fake Completion token to distinguish completion returned by Jedi
1882 1882 and usual IPython completion.
1883 1883
1884 1884 .. note::
1885 1885
1886 1886 Completions are not completely deduplicated yet. If identical
1887 1887 completions are coming from different sources this function does not
1888 1888 ensure that each completion object will only be present once.
1889 1889 """
1890 1890 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1891 1891 "It may change without warnings. "
1892 1892 "Use in corresponding context manager.",
1893 1893 category=ProvisionalCompleterWarning, stacklevel=2)
1894 1894
1895 1895 seen = set()
1896 1896 profiler:Optional[cProfile.Profile]
1897 1897 try:
1898 1898 if self.profile_completions:
1899 1899 import cProfile
1900 1900 profiler = cProfile.Profile()
1901 1901 profiler.enable()
1902 1902 else:
1903 1903 profiler = None
1904 1904
1905 1905 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1906 1906 if c and (c in seen):
1907 1907 continue
1908 1908 yield c
1909 1909 seen.add(c)
1910 1910 except KeyboardInterrupt:
1911 1911 """if completions take too long and users send keyboard interrupt,
1912 1912 do not crash and return ASAP. """
1913 1913 pass
1914 1914 finally:
1915 1915 if profiler is not None:
1916 1916 profiler.disable()
1917 1917 ensure_dir_exists(self.profiler_output_dir)
1918 1918 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
1919 1919 print("Writing profiler output to", output_path)
1920 1920 profiler.dump_stats(output_path)
1921 1921
1922 1922 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
1923 1923 """
1924 1924 Core completion module.Same signature as :any:`completions`, with the
1925 1925 extra `timeout` parameter (in seconds).
1926 1926
1927 1927 Computing jedi's completion ``.type`` can be quite expensive (it is a
1928 1928 lazy property) and can require some warm-up, more warm up than just
1929 1929 computing the ``name`` of a completion. The warm-up can be :
1930 1930
1931 1931 - Long warm-up the first time a module is encountered after
1932 1932 install/update: actually build parse/inference tree.
1933 1933
1934 1934 - first time the module is encountered in a session: load tree from
1935 1935 disk.
1936 1936
1937 1937 We don't want to block completions for tens of seconds so we give the
1938 1938 completer a "budget" of ``_timeout`` seconds per invocation to compute
1939 1939 completions types, the completions that have not yet been computed will
1940 1940 be marked as "unknown" an will have a chance to be computed next round
1941 1941 are things get cached.
1942 1942
1943 1943 Keep in mind that Jedi is not the only thing treating the completion so
1944 1944 keep the timeout short-ish as if we take more than 0.3 second we still
1945 1945 have lots of processing to do.
1946 1946
1947 1947 """
1948 1948 deadline = time.monotonic() + _timeout
1949 1949
1950 1950
1951 1951 before = full_text[:offset]
1952 1952 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1953 1953
1954 1954 matched_text, matches, matches_origin, jedi_matches = self._complete(
1955 1955 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1956 1956
1957 1957 iter_jm = iter(jedi_matches)
1958 1958 if _timeout:
1959 1959 for jm in iter_jm:
1960 1960 try:
1961 1961 type_ = jm.type
1962 1962 except Exception:
1963 1963 if self.debug:
1964 1964 print("Error in Jedi getting type of ", jm)
1965 1965 type_ = None
1966 1966 delta = len(jm.name_with_symbols) - len(jm.complete)
1967 1967 if type_ == 'function':
1968 1968 signature = _make_signature(jm)
1969 1969 else:
1970 1970 signature = ''
1971 1971 yield Completion(start=offset - delta,
1972 1972 end=offset,
1973 1973 text=jm.name_with_symbols,
1974 1974 type=type_,
1975 1975 signature=signature,
1976 1976 _origin='jedi')
1977 1977
1978 1978 if time.monotonic() > deadline:
1979 1979 break
1980 1980
1981 1981 for jm in iter_jm:
1982 1982 delta = len(jm.name_with_symbols) - len(jm.complete)
1983 1983 yield Completion(start=offset - delta,
1984 1984 end=offset,
1985 1985 text=jm.name_with_symbols,
1986 1986 type='<unknown>', # don't compute type for speed
1987 1987 _origin='jedi',
1988 1988 signature='')
1989 1989
1990 1990
1991 1991 start_offset = before.rfind(matched_text)
1992 1992
1993 1993 # TODO:
1994 1994 # Suppress this, right now just for debug.
1995 1995 if jedi_matches and matches and self.debug:
1996 1996 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--',
1997 1997 _origin='debug', type='none', signature='')
1998 1998
1999 1999 # I'm unsure if this is always true, so let's assert and see if it
2000 2000 # crash
2001 2001 assert before.endswith(matched_text)
2002 2002 for m, t in zip(matches, matches_origin):
2003 2003 yield Completion(start=start_offset, end=offset, text=m, _origin=t, signature='', type='<unknown>')
2004 2004
2005 2005
2006 2006 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2007 2007 """Find completions for the given text and line context.
2008 2008
2009 2009 Note that both the text and the line_buffer are optional, but at least
2010 2010 one of them must be given.
2011 2011
2012 2012 Parameters
2013 2013 ----------
2014 2014 text : string, optional
2015 2015 Text to perform the completion on. If not given, the line buffer
2016 2016 is split using the instance's CompletionSplitter object.
2017 2017 line_buffer : string, optional
2018 2018 If not given, the completer attempts to obtain the current line
2019 2019 buffer via readline. This keyword allows clients which are
2020 2020 requesting for text completions in non-readline contexts to inform
2021 2021 the completer of the entire text.
2022 2022 cursor_pos : int, optional
2023 2023 Index of the cursor in the full line buffer. Should be provided by
2024 2024 remote frontends where kernel has no access to frontend state.
2025 2025
2026 2026 Returns
2027 2027 -------
2028 2028 Tuple of two items:
2029 2029 text : str
2030 2030 Text that was actually used in the completion.
2031 2031 matches : list
2032 2032 A list of completion matches.
2033 2033
2034 2034 Notes
2035 2035 -----
2036 2036 This API is likely to be deprecated and replaced by
2037 2037 :any:`IPCompleter.completions` in the future.
2038 2038
2039 2039 """
2040 2040 warnings.warn('`Completer.complete` is pending deprecation since '
2041 2041 'IPython 6.0 and will be replaced by `Completer.completions`.',
2042 2042 PendingDeprecationWarning)
2043 2043 # potential todo, FOLD the 3rd throw away argument of _complete
2044 2044 # into the first 2 one.
2045 2045 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
2046 2046
2047 2047 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2048 2048 full_text=None) -> _CompleteResult:
2049 2049 """
2050 2050 Like complete but can also returns raw jedi completions as well as the
2051 2051 origin of the completion text. This could (and should) be made much
2052 2052 cleaner but that will be simpler once we drop the old (and stateful)
2053 2053 :any:`complete` API.
2054 2054
2055 2055 With current provisional API, cursor_pos act both (depending on the
2056 2056 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2057 2057 ``column`` when passing multiline strings this could/should be renamed
2058 2058 but would add extra noise.
2059 2059
2060 2060 Returns
2061 2061 -------
2062 2062 A tuple of N elements which are (likely):
2063 2063 matched_text: ? the text that the complete matched
2064 2064 matches: list of completions ?
2065 2065 matches_origin: ? list same lenght as matches, and where each completion came from
2066 2066 jedi_matches: list of Jedi matches, have it's own structure.
2067 2067 """
2068 2068
2069 2069
2070 2070 # if the cursor position isn't given, the only sane assumption we can
2071 2071 # make is that it's at the end of the line (the common case)
2072 2072 if cursor_pos is None:
2073 2073 cursor_pos = len(line_buffer) if text is None else len(text)
2074 2074
2075 2075 if self.use_main_ns:
2076 2076 self.namespace = __main__.__dict__
2077 2077
2078 2078 # if text is either None or an empty string, rely on the line buffer
2079 2079 if (not line_buffer) and full_text:
2080 2080 line_buffer = full_text.split('\n')[cursor_line]
2081 2081 if not text: # issue #11508: check line_buffer before calling split_line
2082 2082 text = self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ''
2083 2083
2084 2084 if self.backslash_combining_completions:
2085 2085 # allow deactivation of these on windows.
2086 2086 base_text = text if not line_buffer else line_buffer[:cursor_pos]
2087 2087
2088 2088 for meth in (self.latex_matches,
2089 2089 self.unicode_name_matches,
2090 2090 back_latex_name_matches,
2091 2091 back_unicode_name_matches,
2092 2092 self.fwd_unicode_match):
2093 2093 name_text, name_matches = meth(base_text)
2094 2094 if name_text:
2095 2095 return _CompleteResult(name_text, name_matches[:MATCHES_LIMIT], \
2096 2096 [meth.__qualname__]*min(len(name_matches), MATCHES_LIMIT), ())
2097 2097
2098 2098
2099 2099 # If no line buffer is given, assume the input text is all there was
2100 2100 if line_buffer is None:
2101 2101 line_buffer = text
2102 2102
2103 2103 self.line_buffer = line_buffer
2104 2104 self.text_until_cursor = self.line_buffer[:cursor_pos]
2105 2105
2106 2106 # Do magic arg matches
2107 2107 for matcher in self.magic_arg_matchers:
2108 2108 matches = list(matcher(line_buffer))[:MATCHES_LIMIT]
2109 2109 if matches:
2110 2110 origins = [matcher.__qualname__] * len(matches)
2111 2111 return _CompleteResult(text, matches, origins, ())
2112 2112
2113 2113 # Start with a clean slate of completions
2114 2114 matches = []
2115 2115
2116 2116 # FIXME: we should extend our api to return a dict with completions for
2117 2117 # different types of objects. The rlcomplete() method could then
2118 2118 # simply collapse the dict into a list for readline, but we'd have
2119 2119 # richer completion semantics in other environments.
2120 2120 completions:Iterable[Any] = []
2121 2121 if self.use_jedi:
2122 2122 if not full_text:
2123 2123 full_text = line_buffer
2124 2124 completions = self._jedi_matches(
2125 2125 cursor_pos, cursor_line, full_text)
2126 2126
2127 2127 if self.merge_completions:
2128 2128 matches = []
2129 2129 for matcher in self.matchers:
2130 2130 try:
2131 2131 matches.extend([(m, matcher.__qualname__)
2132 2132 for m in matcher(text)])
2133 2133 except:
2134 2134 # Show the ugly traceback if the matcher causes an
2135 2135 # exception, but do NOT crash the kernel!
2136 2136 sys.excepthook(*sys.exc_info())
2137 2137 else:
2138 2138 for matcher in self.matchers:
2139 2139 matches = [(m, matcher.__qualname__)
2140 2140 for m in matcher(text)]
2141 2141 if matches:
2142 2142 break
2143 2143
2144 2144 seen = set()
2145 2145 filtered_matches = set()
2146 2146 for m in matches:
2147 2147 t, c = m
2148 2148 if t not in seen:
2149 2149 filtered_matches.add(m)
2150 2150 seen.add(t)
2151 2151
2152 2152 _filtered_matches = sorted(filtered_matches, key=lambda x: completions_sorting_key(x[0]))
2153 2153
2154 2154 custom_res = [(m, 'custom') for m in self.dispatch_custom_completer(text) or []]
2155 2155
2156 2156 _filtered_matches = custom_res or _filtered_matches
2157 2157
2158 2158 _filtered_matches = _filtered_matches[:MATCHES_LIMIT]
2159 2159 _matches = [m[0] for m in _filtered_matches]
2160 2160 origins = [m[1] for m in _filtered_matches]
2161 2161
2162 2162 self.matches = _matches
2163 2163
2164 2164 return _CompleteResult(text, _matches, origins, completions)
2165 2165
2166 2166 def fwd_unicode_match(self, text:str) -> Tuple[str, Sequence[str]]:
2167 2167 """
2168 2168 Forward match a string starting with a backslash with a list of
2169 2169 potential Unicode completions.
2170 2170
2171 2171 Will compute list list of Unicode character names on first call and cache it.
2172 2172
2173 2173 Returns
2174 2174 -------
2175 2175 At tuple with:
2176 2176 - matched text (empty if no matches)
2177 2177 - list of potential completions, empty tuple otherwise)
2178 2178 """
2179 2179 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2180 2180 # We could do a faster match using a Trie.
2181 2181
2182 2182 # Using pygtrie the follwing seem to work:
2183 2183
2184 2184 # s = PrefixSet()
2185 2185
2186 2186 # for c in range(0,0x10FFFF + 1):
2187 2187 # try:
2188 2188 # s.add(unicodedata.name(chr(c)))
2189 2189 # except ValueError:
2190 2190 # pass
2191 2191 # [''.join(k) for k in s.iter(prefix)]
2192 2192
2193 2193 # But need to be timed and adds an extra dependency.
2194 2194
2195 2195 slashpos = text.rfind('\\')
2196 2196 # if text starts with slash
2197 2197 if slashpos > -1:
2198 2198 # PERF: It's important that we don't access self._unicode_names
2199 2199 # until we're inside this if-block. _unicode_names is lazily
2200 2200 # initialized, and it takes a user-noticeable amount of time to
2201 2201 # initialize it, so we don't want to initialize it unless we're
2202 2202 # actually going to use it.
2203 2203 s = text[slashpos+1:]
2204 2204 candidates = [x for x in self.unicode_names if x.startswith(s)]
2205 2205 if candidates:
2206 2206 return s, candidates
2207 2207 else:
2208 2208 return '', ()
2209 2209
2210 2210 # if text does not start with slash
2211 2211 else:
2212 2212 return '', ()
2213 2213
2214 2214 @property
2215 2215 def unicode_names(self) -> List[str]:
2216 2216 """List of names of unicode code points that can be completed.
2217 2217
2218 2218 The list is lazily initialized on first access.
2219 2219 """
2220 2220 if self._unicode_names is None:
2221 2221 names = []
2222 2222 for c in range(0,0x10FFFF + 1):
2223 2223 try:
2224 2224 names.append(unicodedata.name(chr(c)))
2225 2225 except ValueError:
2226 2226 pass
2227 2227 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2228 2228
2229 2229 return self._unicode_names
2230 2230
2231 2231 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2232 2232 names = []
2233 2233 for start,stop in ranges:
2234 2234 for c in range(start, stop) :
2235 2235 try:
2236 2236 names.append(unicodedata.name(chr(c)))
2237 2237 except ValueError:
2238 2238 pass
2239 2239 return names
@@ -1,354 +1,370 b''
1 1 # encoding: utf-8
2 2 """Implementations for various useful completers.
3 3
4 4 These are all loaded by default by IPython.
5 5 """
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2010-2011 The IPython Development Team.
8 8 #
9 9 # Distributed under the terms of the BSD License.
10 10 #
11 11 # The full license is in the file COPYING.txt, distributed with this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17
18 18 # Stdlib imports
19 19 import glob
20 20 import inspect
21 21 import os
22 22 import re
23 23 import sys
24 24 from importlib import import_module
25 25 from importlib.machinery import all_suffixes
26 26
27 27
28 28 # Third-party imports
29 29 from time import time
30 30 from zipimport import zipimporter
31 31
32 32 # Our own imports
33 33 from .completer import expand_user, compress_user
34 34 from .error import TryNext
35 35 from ..utils._process_common import arg_split
36 36
37 37 # FIXME: this should be pulled in with the right call via the component system
38 38 from IPython import get_ipython
39 39
40 40 from typing import List
41 41
42 42 #-----------------------------------------------------------------------------
43 43 # Globals and constants
44 44 #-----------------------------------------------------------------------------
45 45 _suffixes = all_suffixes()
46 46
47 47 # Time in seconds after which the rootmodules will be stored permanently in the
48 48 # ipython ip.db database (kept in the user's .ipython dir).
49 49 TIMEOUT_STORAGE = 2
50 50
51 51 # Time in seconds after which we give up
52 52 TIMEOUT_GIVEUP = 20
53 53
54 54 # Regular expression for the python import statement
55 55 import_re = re.compile(r'(?P<name>[^\W\d]\w*?)'
56 56 r'(?P<package>[/\\]__init__)?'
57 57 r'(?P<suffix>%s)$' %
58 58 r'|'.join(re.escape(s) for s in _suffixes))
59 59
60 60 # RE for the ipython %run command (python + ipython scripts)
61 61 magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
62 62
63 63 #-----------------------------------------------------------------------------
64 64 # Local utilities
65 65 #-----------------------------------------------------------------------------
66 66
67 67 def module_list(path):
68 68 """
69 69 Return the list containing the names of the modules available in the given
70 70 folder.
71 71 """
72 72 # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
73 73 if path == '':
74 74 path = '.'
75 75
76 76 # A few local constants to be used in loops below
77 77 pjoin = os.path.join
78 78
79 79 if os.path.isdir(path):
80 80 # Build a list of all files in the directory and all files
81 81 # in its subdirectories. For performance reasons, do not
82 82 # recurse more than one level into subdirectories.
83 83 files = []
84 84 for root, dirs, nondirs in os.walk(path, followlinks=True):
85 85 subdir = root[len(path)+1:]
86 86 if subdir:
87 87 files.extend(pjoin(subdir, f) for f in nondirs)
88 88 dirs[:] = [] # Do not recurse into additional subdirectories.
89 89 else:
90 90 files.extend(nondirs)
91 91
92 92 else:
93 93 try:
94 94 files = list(zipimporter(path)._files.keys())
95 95 except:
96 96 files = []
97 97
98 98 # Build a list of modules which match the import_re regex.
99 99 modules = []
100 100 for f in files:
101 101 m = import_re.match(f)
102 102 if m:
103 103 modules.append(m.group('name'))
104 104 return list(set(modules))
105 105
106 106
107 107 def get_root_modules():
108 108 """
109 109 Returns a list containing the names of all the modules available in the
110 110 folders of the pythonpath.
111 111
112 112 ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
113 113 """
114 114 ip = get_ipython()
115 115 if ip is None:
116 116 # No global shell instance to store cached list of modules.
117 117 # Don't try to scan for modules every time.
118 118 return list(sys.builtin_module_names)
119 119
120 120 rootmodules_cache = ip.db.get('rootmodules_cache', {})
121 121 rootmodules = list(sys.builtin_module_names)
122 122 start_time = time()
123 123 store = False
124 124 for path in sys.path:
125 125 try:
126 126 modules = rootmodules_cache[path]
127 127 except KeyError:
128 128 modules = module_list(path)
129 129 try:
130 130 modules.remove('__init__')
131 131 except ValueError:
132 132 pass
133 133 if path not in ('', '.'): # cwd modules should not be cached
134 134 rootmodules_cache[path] = modules
135 135 if time() - start_time > TIMEOUT_STORAGE and not store:
136 136 store = True
137 137 print("\nCaching the list of root modules, please wait!")
138 138 print("(This will only be done once - type '%rehashx' to "
139 139 "reset cache!)\n")
140 140 sys.stdout.flush()
141 141 if time() - start_time > TIMEOUT_GIVEUP:
142 142 print("This is taking too long, we give up.\n")
143 143 return []
144 144 rootmodules.extend(modules)
145 145 if store:
146 146 ip.db['rootmodules_cache'] = rootmodules_cache
147 147 rootmodules = list(set(rootmodules))
148 148 return rootmodules
149 149
150 150
151 151 def is_importable(module, attr, only_modules):
152 152 if only_modules:
153 153 return inspect.ismodule(getattr(module, attr))
154 154 else:
155 155 return not(attr[:2] == '__' and attr[-2:] == '__')
156 156
157 def is_possible_submodule(module, attr):
158 try:
159 obj = getattr(module, attr)
160 except AttributeError:
161 # Is possilby an unimported submodule
162 return True
163 except TypeError:
164 # https://github.com/ipython/ipython/issues/9678
165 return False
166 return inspect.ismodule(obj)
167
157 168
158 169 def try_import(mod: str, only_modules=False) -> List[str]:
159 170 """
160 171 Try to import given module and return list of potential completions.
161 172 """
162 173 mod = mod.rstrip('.')
163 174 try:
164 175 m = import_module(mod)
165 176 except:
166 177 return []
167 178
168 179 m_is_init = '__init__' in (getattr(m, '__file__', '') or '')
169 180
170 181 completions = []
171 182 if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
172 183 completions.extend( [attr for attr in dir(m) if
173 184 is_importable(m, attr, only_modules)])
174 185
175 completions.extend(getattr(m, '__all__', []))
186 m_all = getattr(m, "__all__", [])
187 if only_modules:
188 completions.extend(attr for attr in m_all if is_possible_submodule(m, attr))
189 else:
190 completions.extend(m_all)
191
176 192 if m_is_init:
177 193 completions.extend(module_list(os.path.dirname(m.__file__)))
178 194 completions_set = {c for c in completions if isinstance(c, str)}
179 195 completions_set.discard('__init__')
180 196 return list(completions_set)
181 197
182 198
183 199 #-----------------------------------------------------------------------------
184 200 # Completion-related functions.
185 201 #-----------------------------------------------------------------------------
186 202
187 203 def quick_completer(cmd, completions):
188 204 r""" Easily create a trivial completer for a command.
189 205
190 206 Takes either a list of completions, or all completions in string (that will
191 207 be split on whitespace).
192 208
193 209 Example::
194 210
195 211 [d:\ipython]|1> import ipy_completers
196 212 [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
197 213 [d:\ipython]|3> foo b<TAB>
198 214 bar baz
199 215 [d:\ipython]|3> foo ba
200 216 """
201 217
202 218 if isinstance(completions, str):
203 219 completions = completions.split()
204 220
205 221 def do_complete(self, event):
206 222 return completions
207 223
208 224 get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
209 225
210 226 def module_completion(line):
211 227 """
212 228 Returns a list containing the completion possibilities for an import line.
213 229
214 230 The line looks like this :
215 231 'import xml.d'
216 232 'from xml.dom import'
217 233 """
218 234
219 235 words = line.split(' ')
220 236 nwords = len(words)
221 237
222 238 # from whatever <tab> -> 'import '
223 239 if nwords == 3 and words[0] == 'from':
224 240 return ['import ']
225 241
226 242 # 'from xy<tab>' or 'import xy<tab>'
227 243 if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
228 244 if nwords == 1:
229 245 return get_root_modules()
230 246 mod = words[1].split('.')
231 247 if len(mod) < 2:
232 248 return get_root_modules()
233 249 completion_list = try_import('.'.join(mod[:-1]), True)
234 250 return ['.'.join(mod[:-1] + [el]) for el in completion_list]
235 251
236 252 # 'from xyz import abc<tab>'
237 253 if nwords >= 3 and words[0] == 'from':
238 254 mod = words[1]
239 255 return try_import(mod)
240 256
241 257 #-----------------------------------------------------------------------------
242 258 # Completers
243 259 #-----------------------------------------------------------------------------
244 260 # These all have the func(self, event) signature to be used as custom
245 261 # completers
246 262
247 263 def module_completer(self,event):
248 264 """Give completions after user has typed 'import ...' or 'from ...'"""
249 265
250 266 # This works in all versions of python. While 2.5 has
251 267 # pkgutil.walk_packages(), that particular routine is fairly dangerous,
252 268 # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
253 269 # of possibly problematic side effects.
254 270 # This search the folders in the sys.path for available modules.
255 271
256 272 return module_completion(event.line)
257 273
258 274 # FIXME: there's a lot of logic common to the run, cd and builtin file
259 275 # completers, that is currently reimplemented in each.
260 276
261 277 def magic_run_completer(self, event):
262 278 """Complete files that end in .py or .ipy or .ipynb for the %run command.
263 279 """
264 280 comps = arg_split(event.line, strict=False)
265 281 # relpath should be the current token that we need to complete.
266 282 if (len(comps) > 1) and (not event.line.endswith(' ')):
267 283 relpath = comps[-1].strip("'\"")
268 284 else:
269 285 relpath = ''
270 286
271 287 #print("\nev=", event) # dbg
272 288 #print("rp=", relpath) # dbg
273 289 #print('comps=', comps) # dbg
274 290
275 291 lglob = glob.glob
276 292 isdir = os.path.isdir
277 293 relpath, tilde_expand, tilde_val = expand_user(relpath)
278 294
279 295 # Find if the user has already typed the first filename, after which we
280 296 # should complete on all files, since after the first one other files may
281 297 # be arguments to the input script.
282 298
283 299 if any(magic_run_re.match(c) for c in comps):
284 300 matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
285 301 for f in lglob(relpath+'*')]
286 302 else:
287 303 dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
288 304 pys = [f.replace('\\','/')
289 305 for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
290 306 lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
291 307
292 308 matches = dirs + pys
293 309
294 310 #print('run comp:', dirs+pys) # dbg
295 311 return [compress_user(p, tilde_expand, tilde_val) for p in matches]
296 312
297 313
298 314 def cd_completer(self, event):
299 315 """Completer function for cd, which only returns directories."""
300 316 ip = get_ipython()
301 317 relpath = event.symbol
302 318
303 319 #print(event) # dbg
304 320 if event.line.endswith('-b') or ' -b ' in event.line:
305 321 # return only bookmark completions
306 322 bkms = self.db.get('bookmarks', None)
307 323 if bkms:
308 324 return bkms.keys()
309 325 else:
310 326 return []
311 327
312 328 if event.symbol == '-':
313 329 width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
314 330 # jump in directory history by number
315 331 fmt = '-%0' + width_dh +'d [%s]'
316 332 ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
317 333 if len(ents) > 1:
318 334 return ents
319 335 return []
320 336
321 337 if event.symbol.startswith('--'):
322 338 return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
323 339
324 340 # Expand ~ in path and normalize directory separators.
325 341 relpath, tilde_expand, tilde_val = expand_user(relpath)
326 342 relpath = relpath.replace('\\','/')
327 343
328 344 found = []
329 345 for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
330 346 if os.path.isdir(f)]:
331 347 if ' ' in d:
332 348 # we don't want to deal with any of that, complex code
333 349 # for this is elsewhere
334 350 raise TryNext
335 351
336 352 found.append(d)
337 353
338 354 if not found:
339 355 if os.path.isdir(relpath):
340 356 return [compress_user(relpath, tilde_expand, tilde_val)]
341 357
342 358 # if no completions so far, try bookmarks
343 359 bks = self.db.get('bookmarks',{})
344 360 bkmatches = [s for s in bks if s.startswith(event.symbol)]
345 361 if bkmatches:
346 362 return bkmatches
347 363
348 364 raise TryNext
349 365
350 366 return [compress_user(p, tilde_expand, tilde_val) for p in found]
351 367
352 368 def reset_completer(self, event):
353 369 "A completer for %reset magic"
354 370 return '-f -s in out array dhist'.split()
@@ -1,229 +1,223 b''
1 1 # encoding: utf-8
2 2 """sys.excepthook for IPython itself, leaves a detailed report on disk.
3 3
4 4 Authors:
5 5
6 6 * Fernando Perez
7 7 * Brian E. Granger
8 8 """
9 9
10 10 #-----------------------------------------------------------------------------
11 11 # Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
12 12 # Copyright (C) 2008-2011 The IPython Development Team
13 13 #
14 14 # Distributed under the terms of the BSD License. The full license is in
15 15 # the file COPYING, distributed as part of this software.
16 16 #-----------------------------------------------------------------------------
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Imports
20 20 #-----------------------------------------------------------------------------
21 21
22 22 import os
23 23 import sys
24 24 import traceback
25 25 from pprint import pformat
26 26 from pathlib import Path
27 27
28 28 from IPython.core import ultratb
29 29 from IPython.core.release import author_email
30 30 from IPython.utils.sysinfo import sys_info
31 31 from IPython.utils.py3compat import input
32 32
33 33 from IPython.core.release import __version__ as version
34 34
35 35 #-----------------------------------------------------------------------------
36 36 # Code
37 37 #-----------------------------------------------------------------------------
38 38
39 39 # Template for the user message.
40 40 _default_message_template = """\
41 41 Oops, {app_name} crashed. We do our best to make it stable, but...
42 42
43 43 A crash report was automatically generated with the following information:
44 44 - A verbatim copy of the crash traceback.
45 45 - A copy of your input history during this session.
46 46 - Data on your current {app_name} configuration.
47 47
48 48 It was left in the file named:
49 49 \t'{crash_report_fname}'
50 50 If you can email this file to the developers, the information in it will help
51 51 them in understanding and correcting the problem.
52 52
53 53 You can mail it to: {contact_name} at {contact_email}
54 54 with the subject '{app_name} Crash Report'.
55 55
56 56 If you want to do it now, the following command will work (under Unix):
57 57 mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
58 58
59 59 In your email, please also include information about:
60 60 - The operating system under which the crash happened: Linux, macOS, Windows,
61 61 other, and which exact version (for example: Ubuntu 16.04.3, macOS 10.13.2,
62 62 Windows 10 Pro), and whether it is 32-bit or 64-bit;
63 63 - How {app_name} was installed: using pip or conda, from GitHub, as part of
64 64 a Docker container, or other, providing more detail if possible;
65 65 - How to reproduce the crash: what exact sequence of instructions can one
66 66 input to get the same crash? Ideally, find a minimal yet complete sequence
67 67 of instructions that yields the crash.
68 68
69 69 To ensure accurate tracking of this issue, please file a report about it at:
70 70 {bug_tracker}
71 71 """
72 72
73 73 _lite_message_template = """
74 74 If you suspect this is an IPython {version} bug, please report it at:
75 75 https://github.com/ipython/ipython/issues
76 76 or send an email to the mailing list at {email}
77 77
78 78 You can print a more detailed traceback right now with "%tb", or use "%debug"
79 79 to interactively debug it.
80 80
81 81 Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
82 82 {config}Application.verbose_crash=True
83 83 """
84 84
85 85
86 86 class CrashHandler(object):
87 87 """Customizable crash handlers for IPython applications.
88 88
89 89 Instances of this class provide a :meth:`__call__` method which can be
90 90 used as a ``sys.excepthook``. The :meth:`__call__` signature is::
91 91
92 92 def __call__(self, etype, evalue, etb)
93 93 """
94 94
95 95 message_template = _default_message_template
96 96 section_sep = '\n\n'+'*'*75+'\n\n'
97 97
98 98 def __init__(self, app, contact_name=None, contact_email=None,
99 99 bug_tracker=None, show_crash_traceback=True, call_pdb=False):
100 100 """Create a new crash handler
101 101
102 102 Parameters
103 103 ----------
104 app : Application
104 app : Application
105 105 A running :class:`Application` instance, which will be queried at
106 106 crash time for internal information.
107
108 107 contact_name : str
109 108 A string with the name of the person to contact.
110
111 109 contact_email : str
112 110 A string with the email address of the contact.
113
114 111 bug_tracker : str
115 112 A string with the URL for your project's bug tracker.
116
117 113 show_crash_traceback : bool
118 114 If false, don't print the crash traceback on stderr, only generate
119 115 the on-disk report
120
121 Non-argument instance attributes:
122
116 Non-argument instance attributes
123 117 These instances contain some non-argument attributes which allow for
124 118 further customization of the crash handler's behavior. Please see the
125 119 source for further details.
126 120 """
127 121 self.crash_report_fname = "Crash_report_%s.txt" % app.name
128 122 self.app = app
129 123 self.call_pdb = call_pdb
130 124 #self.call_pdb = True # dbg
131 125 self.show_crash_traceback = show_crash_traceback
132 126 self.info = dict(app_name = app.name,
133 127 contact_name = contact_name,
134 128 contact_email = contact_email,
135 129 bug_tracker = bug_tracker,
136 130 crash_report_fname = self.crash_report_fname)
137 131
138 132
139 133 def __call__(self, etype, evalue, etb):
140 134 """Handle an exception, call for compatible with sys.excepthook"""
141 135
142 136 # do not allow the crash handler to be called twice without reinstalling it
143 137 # this prevents unlikely errors in the crash handling from entering an
144 138 # infinite loop.
145 139 sys.excepthook = sys.__excepthook__
146 140
147 141 # Report tracebacks shouldn't use color in general (safer for users)
148 142 color_scheme = 'NoColor'
149 143
150 144 # Use this ONLY for developer debugging (keep commented out for release)
151 145 #color_scheme = 'Linux' # dbg
152 146 try:
153 147 rptdir = self.app.ipython_dir
154 148 except:
155 149 rptdir = Path.cwd()
156 150 if rptdir is None or not Path.is_dir(rptdir):
157 151 rptdir = Path.cwd()
158 152 report_name = rptdir / self.crash_report_fname
159 153 # write the report filename into the instance dict so it can get
160 154 # properly expanded out in the user message template
161 155 self.crash_report_fname = report_name
162 156 self.info['crash_report_fname'] = report_name
163 157 TBhandler = ultratb.VerboseTB(
164 158 color_scheme=color_scheme,
165 159 long_header=1,
166 160 call_pdb=self.call_pdb,
167 161 )
168 162 if self.call_pdb:
169 163 TBhandler(etype,evalue,etb)
170 164 return
171 165 else:
172 166 traceback = TBhandler.text(etype,evalue,etb,context=31)
173 167
174 168 # print traceback to screen
175 169 if self.show_crash_traceback:
176 170 print(traceback, file=sys.stderr)
177 171
178 172 # and generate a complete report on disk
179 173 try:
180 174 report = open(report_name,'w')
181 175 except:
182 176 print('Could not create crash report on disk.', file=sys.stderr)
183 177 return
184 178
185 179 with report:
186 180 # Inform user on stderr of what happened
187 181 print('\n'+'*'*70+'\n', file=sys.stderr)
188 182 print(self.message_template.format(**self.info), file=sys.stderr)
189 183
190 184 # Construct report on disk
191 185 report.write(self.make_report(traceback))
192 186
193 187 input("Hit <Enter> to quit (your terminal may close):")
194 188
195 189 def make_report(self,traceback):
196 190 """Return a string containing a crash report."""
197 191
198 192 sec_sep = self.section_sep
199 193
200 194 report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
201 195 rpt_add = report.append
202 196 rpt_add(sys_info())
203 197
204 198 try:
205 199 config = pformat(self.app.config)
206 200 rpt_add(sec_sep)
207 201 rpt_add('Application name: %s\n\n' % self.app_name)
208 202 rpt_add('Current user configuration structure:\n\n')
209 203 rpt_add(config)
210 204 except:
211 205 pass
212 206 rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
213 207
214 208 return ''.join(report)
215 209
216 210
217 211 def crash_handler_lite(etype, evalue, tb):
218 212 """a light excepthook, adding a small message to the usual traceback"""
219 213 traceback.print_exception(etype, evalue, tb)
220 214
221 215 from IPython.core.interactiveshell import InteractiveShell
222 216 if InteractiveShell.initialized():
223 217 # we are in a Shell environment, give %magic example
224 218 config = "%config "
225 219 else:
226 220 # we are not in a shell, show generic config
227 221 config = "c."
228 222 print(_lite_message_template.format(email=author_email, config=config, version=version), file=sys.stderr)
229 223
@@ -1,857 +1,972 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Pdb debugger class.
4 4
5 5 Modified from the standard pdb.Pdb class to avoid including readline, so that
6 6 the command line completion of other programs which include this isn't
7 7 damaged.
8 8
9 9 In the future, this class will be expanded with improvements over the standard
10 10 pdb.
11 11
12 12 The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
13 13 changes. Licensing should therefore be under the standard Python terms. For
14 14 details on the PSF (Python Software Foundation) standard license, see:
15 15
16 16 https://docs.python.org/2/license.html
17 17 """
18 18
19 19 #*****************************************************************************
20 20 #
21 21 # This file is licensed under the PSF license.
22 22 #
23 23 # Copyright (C) 2001 Python Software Foundation, www.python.org
24 24 # Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
25 25 #
26 26 #
27 27 #*****************************************************************************
28 28
29 29 import bdb
30 30 import functools
31 31 import inspect
32 32 import linecache
33 33 import sys
34 34 import warnings
35 35 import re
36 import os
36 37
37 38 from IPython import get_ipython
38 39 from IPython.utils import PyColorize
39 40 from IPython.utils import coloransi, py3compat
40 41 from IPython.core.excolors import exception_colors
41 42 from IPython.testing.skipdoctest import skip_doctest
42 43
43 44
44 45 prompt = 'ipdb> '
45 46
46 47 # We have to check this directly from sys.argv, config struct not yet available
47 48 from pdb import Pdb as OldPdb
48 49
49 50 # Allow the set_trace code to operate outside of an ipython instance, even if
50 51 # it does so with some limitations. The rest of this support is implemented in
51 52 # the Tracer constructor.
52 53
53 54
54 55 def make_arrow(pad):
55 56 """generate the leading arrow in front of traceback or debugger"""
56 57 if pad >= 2:
57 58 return '-'*(pad-2) + '> '
58 59 elif pad == 1:
59 60 return '>'
60 61 return ''
61 62
62 63
63 64 def BdbQuit_excepthook(et, ev, tb, excepthook=None):
64 65 """Exception hook which handles `BdbQuit` exceptions.
65 66
66 67 All other exceptions are processed using the `excepthook`
67 68 parameter.
68 69 """
69 70 warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
70 71 DeprecationWarning, stacklevel=2)
71 72 if et == bdb.BdbQuit:
72 73 print('Exiting Debugger.')
73 74 elif excepthook is not None:
74 75 excepthook(et, ev, tb)
75 76 else:
76 77 # Backwards compatibility. Raise deprecation warning?
77 78 BdbQuit_excepthook.excepthook_ori(et, ev, tb)
78 79
79 80
80 81 def BdbQuit_IPython_excepthook(self, et, ev, tb, tb_offset=None):
81 82 warnings.warn(
82 83 "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
83 84 DeprecationWarning, stacklevel=2)
84 85 print('Exiting Debugger.')
85 86
86 87
87 88 class Tracer(object):
88 89 """
89 90 DEPRECATED
90 91
91 92 Class for local debugging, similar to pdb.set_trace.
92 93
93 94 Instances of this class, when called, behave like pdb.set_trace, but
94 95 providing IPython's enhanced capabilities.
95 96
96 97 This is implemented as a class which must be initialized in your own code
97 98 and not as a standalone function because we need to detect at runtime
98 99 whether IPython is already active or not. That detection is done in the
99 100 constructor, ensuring that this code plays nicely with a running IPython,
100 101 while functioning acceptably (though with limitations) if outside of it.
101 102 """
102 103
103 104 @skip_doctest
104 105 def __init__(self, colors=None):
105 106 """
106 107 DEPRECATED
107 108
108 109 Create a local debugger instance.
109 110
110 111 Parameters
111 112 ----------
112
113 113 colors : str, optional
114 114 The name of the color scheme to use, it must be one of IPython's
115 115 valid color schemes. If not given, the function will default to
116 116 the current IPython scheme when running inside IPython, and to
117 117 'NoColor' otherwise.
118 118
119 119 Examples
120 120 --------
121 121 ::
122 122
123 123 from IPython.core.debugger import Tracer; debug_here = Tracer()
124 124
125 125 Later in your code::
126 126
127 127 debug_here() # -> will open up the debugger at that point.
128 128
129 129 Once the debugger activates, you can use all of its regular commands to
130 130 step through code, set breakpoints, etc. See the pdb documentation
131 131 from the Python standard library for usage details.
132 132 """
133 133 warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
134 134 "`IPython.core.debugger.Pdb.set_trace()`",
135 135 DeprecationWarning, stacklevel=2)
136 136
137 137 ip = get_ipython()
138 138 if ip is None:
139 139 # Outside of ipython, we set our own exception hook manually
140 140 sys.excepthook = functools.partial(BdbQuit_excepthook,
141 141 excepthook=sys.excepthook)
142 142 def_colors = 'NoColor'
143 143 else:
144 144 # In ipython, we use its custom exception handler mechanism
145 145 def_colors = ip.colors
146 146 ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
147 147
148 148 if colors is None:
149 149 colors = def_colors
150 150
151 151 # The stdlib debugger internally uses a modified repr from the `repr`
152 152 # module, that limits the length of printed strings to a hardcoded
153 153 # limit of 30 characters. That much trimming is too aggressive, let's
154 154 # at least raise that limit to 80 chars, which should be enough for
155 155 # most interactive uses.
156 156 try:
157 157 from reprlib import aRepr
158 158 aRepr.maxstring = 80
159 159 except:
160 160 # This is only a user-facing convenience, so any error we encounter
161 161 # here can be warned about but can be otherwise ignored. These
162 162 # printouts will tell us about problems if this API changes
163 163 import traceback
164 164 traceback.print_exc()
165 165
166 166 self.debugger = Pdb(colors)
167 167
168 168 def __call__(self):
169 169 """Starts an interactive debugger at the point where called.
170 170
171 171 This is similar to the pdb.set_trace() function from the std lib, but
172 172 using IPython's enhanced debugger."""
173 173
174 174 self.debugger.set_trace(sys._getframe().f_back)
175 175
176 176
177 177 RGX_EXTRA_INDENT = re.compile(r'(?<=\n)\s+')
178 178
179 179
180 180 def strip_indentation(multiline_string):
181 181 return RGX_EXTRA_INDENT.sub('', multiline_string)
182 182
183 183
184 184 def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
185 185 """Make new_fn have old_fn's doc string. This is particularly useful
186 186 for the ``do_...`` commands that hook into the help system.
187 187 Adapted from from a comp.lang.python posting
188 188 by Duncan Booth."""
189 189 def wrapper(*args, **kw):
190 190 return new_fn(*args, **kw)
191 191 if old_fn.__doc__:
192 192 wrapper.__doc__ = strip_indentation(old_fn.__doc__) + additional_text
193 193 return wrapper
194 194
195 195
196 196 class Pdb(OldPdb):
197 197 """Modified Pdb class, does not load readline.
198 198
199 199 for a standalone version that uses prompt_toolkit, see
200 200 `IPython.terminal.debugger.TerminalPdb` and
201 201 `IPython.terminal.debugger.set_trace()`
202
203
204 This debugger can hide and skip frames that are tagged according to some predicates.
205 See the `skip_predicates` commands.
206
202 207 """
203 208
209 default_predicates = {"tbhide": True, "readonly": False, "ipython_internal": True}
210
204 211 def __init__(self, color_scheme=None, completekey=None,
205 212 stdin=None, stdout=None, context=5, **kwargs):
206 213 """Create a new IPython debugger.
207 214
208 :param color_scheme: Deprecated, do not use.
209 :param completekey: Passed to pdb.Pdb.
210 :param stdin: Passed to pdb.Pdb.
211 :param stdout: Passed to pdb.Pdb.
212 :param context: Number of lines of source code context to show when
215 Parameters
216 ----------
217 color_scheme : default None
218 Deprecated, do not use.
219 completekey : default None
220 Passed to pdb.Pdb.
221 stdin : default None
222 Passed to pdb.Pdb.
223 stdout : default None
224 Passed to pdb.Pdb.
225 context : int
226 Number of lines of source code context to show when
213 227 displaying stacktrace information.
214 :param kwargs: Passed to pdb.Pdb.
215 The possibilities are python version dependent, see the python
216 docs for more info.
228 **kwargs
229 Passed to pdb.Pdb.
230
231 Notes
232 -----
233 The possibilities are python version dependent, see the python
234 docs for more info.
217 235 """
218 236
219 237 # Parent constructor:
220 238 try:
221 239 self.context = int(context)
222 240 if self.context <= 0:
223 241 raise ValueError("Context must be a positive integer")
224 242 except (TypeError, ValueError) as e:
225 243 raise ValueError("Context must be a positive integer") from e
226 244
227 245 # `kwargs` ensures full compatibility with stdlib's `pdb.Pdb`.
228 246 OldPdb.__init__(self, completekey, stdin, stdout, **kwargs)
229 247
230 248 # IPython changes...
231 249 self.shell = get_ipython()
232 250
233 251 if self.shell is None:
234 252 save_main = sys.modules['__main__']
235 253 # No IPython instance running, we must create one
236 254 from IPython.terminal.interactiveshell import \
237 255 TerminalInteractiveShell
238 256 self.shell = TerminalInteractiveShell.instance()
239 257 # needed by any code which calls __import__("__main__") after
240 258 # the debugger was entered. See also #9941.
241 259 sys.modules["__main__"] = save_main
242 260
243 261 if color_scheme is not None:
244 262 warnings.warn(
245 263 "The `color_scheme` argument is deprecated since version 5.1",
246 264 DeprecationWarning, stacklevel=2)
247 265 else:
248 266 color_scheme = self.shell.colors
249 267
250 268 self.aliases = {}
251 269
252 270 # Create color table: we copy the default one from the traceback
253 271 # module and add a few attributes needed for debugging
254 272 self.color_scheme_table = exception_colors()
255 273
256 274 # shorthands
257 275 C = coloransi.TermColors
258 276 cst = self.color_scheme_table
259 277
260 278 cst['NoColor'].colors.prompt = C.NoColor
261 279 cst['NoColor'].colors.breakpoint_enabled = C.NoColor
262 280 cst['NoColor'].colors.breakpoint_disabled = C.NoColor
263 281
264 282 cst['Linux'].colors.prompt = C.Green
265 283 cst['Linux'].colors.breakpoint_enabled = C.LightRed
266 284 cst['Linux'].colors.breakpoint_disabled = C.Red
267 285
268 286 cst['LightBG'].colors.prompt = C.Blue
269 287 cst['LightBG'].colors.breakpoint_enabled = C.LightRed
270 288 cst['LightBG'].colors.breakpoint_disabled = C.Red
271 289
272 290 cst['Neutral'].colors.prompt = C.Blue
273 291 cst['Neutral'].colors.breakpoint_enabled = C.LightRed
274 292 cst['Neutral'].colors.breakpoint_disabled = C.Red
275 293
276 294 # Add a python parser so we can syntax highlight source while
277 295 # debugging.
278 296 self.parser = PyColorize.Parser(style=color_scheme)
279 297 self.set_colors(color_scheme)
280 298
281 299 # Set the prompt - the default prompt is '(Pdb)'
282 300 self.prompt = prompt
283 301 self.skip_hidden = True
302 self.report_skipped = True
303
304 # list of predicates we use to skip frames
305 self._predicates = self.default_predicates
284 306
285 307 def set_colors(self, scheme):
286 308 """Shorthand access to the color table scheme selector method."""
287 309 self.color_scheme_table.set_active_scheme(scheme)
288 310 self.parser.style = scheme
289 311
290 312 def set_trace(self, frame=None):
291 313 if frame is None:
292 314 frame = sys._getframe().f_back
293 315 self.initial_frame = frame
294 316 return super().set_trace(frame)
295 317
318 def _hidden_predicate(self, frame):
319 """
320 Given a frame return whether it it should be hidden or not by IPython.
321 """
322
323 if self._predicates["readonly"]:
324 fname = frame.f_code.co_filename
325 # we need to check for file existence and interactively define
326 # function would otherwise appear as RO.
327 if os.path.isfile(fname) and not os.access(fname, os.W_OK):
328 return True
329
330 if self._predicates["tbhide"]:
331 if frame in (self.curframe, getattr(self, "initial_frame", None)):
332 return False
333 else:
334 return self._get_frame_locals(frame).get("__tracebackhide__", False)
335
336 return False
337
296 338 def hidden_frames(self, stack):
297 339 """
298 340 Given an index in the stack return whether it should be skipped.
299 341
300 342 This is used in up/down and where to skip frames.
301 343 """
302 344 # The f_locals dictionary is updated from the actual frame
303 345 # locals whenever the .f_locals accessor is called, so we
304 346 # avoid calling it here to preserve self.curframe_locals.
305 347 # Futhermore, there is no good reason to hide the current frame.
306 ip_hide = [
307 False
308 if s[0] in (self.curframe, getattr(self, "initial_frame", None))
309 else s[0].f_locals.get("__tracebackhide__", False)
310 for s in stack
311 ]
348 ip_hide = [self._hidden_predicate(s[0]) for s in stack]
312 349 ip_start = [i for i, s in enumerate(ip_hide) if s == "__ipython_bottom__"]
313 if ip_start:
350 if ip_start and self._predicates["ipython_internal"]:
314 351 ip_hide = [h if i > ip_start[0] else True for (i, h) in enumerate(ip_hide)]
315 352 return ip_hide
316 353
317 354 def interaction(self, frame, traceback):
318 355 try:
319 356 OldPdb.interaction(self, frame, traceback)
320 357 except KeyboardInterrupt:
321 358 self.stdout.write("\n" + self.shell.get_exception_only())
322 359
323 360 def precmd(self, line):
324 361 """Perform useful escapes on the command before it is executed."""
325 362
326 363 if line.endswith("??"):
327 364 line = "pinfo2 " + line[:-2]
328 365 elif line.endswith("?"):
329 366 line = "pinfo " + line[:-1]
330 367
331 368 line = super().precmd(line)
332 369
333 370 return line
334 371
335 372 def new_do_frame(self, arg):
336 373 OldPdb.do_frame(self, arg)
337 374
338 375 def new_do_quit(self, arg):
339 376
340 377 if hasattr(self, 'old_all_completions'):
341 378 self.shell.Completer.all_completions = self.old_all_completions
342 379
343 380 return OldPdb.do_quit(self, arg)
344 381
345 382 do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
346 383
347 384 def new_do_restart(self, arg):
348 385 """Restart command. In the context of ipython this is exactly the same
349 386 thing as 'quit'."""
350 387 self.msg("Restart doesn't make sense here. Using 'quit' instead.")
351 388 return self.do_quit(arg)
352 389
353 390 def print_stack_trace(self, context=None):
354 391 Colors = self.color_scheme_table.active_colors
355 392 ColorsNormal = Colors.Normal
356 393 if context is None:
357 394 context = self.context
358 395 try:
359 396 context = int(context)
360 397 if context <= 0:
361 398 raise ValueError("Context must be a positive integer")
362 399 except (TypeError, ValueError) as e:
363 400 raise ValueError("Context must be a positive integer") from e
364 401 try:
365 402 skipped = 0
366 403 for hidden, frame_lineno in zip(self.hidden_frames(self.stack), self.stack):
367 404 if hidden and self.skip_hidden:
368 405 skipped += 1
369 406 continue
370 407 if skipped:
371 408 print(
372 409 f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
373 410 )
374 411 skipped = 0
375 412 self.print_stack_entry(frame_lineno, context=context)
376 413 if skipped:
377 414 print(
378 415 f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
379 416 )
380 417 except KeyboardInterrupt:
381 418 pass
382 419
383 420 def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> ',
384 421 context=None):
385 422 if context is None:
386 423 context = self.context
387 424 try:
388 425 context = int(context)
389 426 if context <= 0:
390 427 raise ValueError("Context must be a positive integer")
391 428 except (TypeError, ValueError) as e:
392 429 raise ValueError("Context must be a positive integer") from e
393 430 print(self.format_stack_entry(frame_lineno, '', context), file=self.stdout)
394 431
395 432 # vds: >>
396 433 frame, lineno = frame_lineno
397 434 filename = frame.f_code.co_filename
398 435 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
399 436 # vds: <<
400 437
438 def _get_frame_locals(self, frame):
439 """ "
440 Acessing f_local of current frame reset the namespace, so we want to avoid
441 that or the following can happend
442
443 ipdb> foo
444 "old"
445 ipdb> foo = "new"
446 ipdb> foo
447 "new"
448 ipdb> where
449 ipdb> foo
450 "old"
451
452 So if frame is self.current_frame we instead return self.curframe_locals
453
454 """
455 if frame is self.curframe:
456 return self.curframe_locals
457 else:
458 return frame.f_locals
459
401 460 def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
402 461 if context is None:
403 462 context = self.context
404 463 try:
405 464 context = int(context)
406 465 if context <= 0:
407 466 print("Context must be a positive integer", file=self.stdout)
408 467 except (TypeError, ValueError):
409 468 print("Context must be a positive integer", file=self.stdout)
410 469
411 470 import reprlib
412 471
413 472 ret = []
414 473
415 474 Colors = self.color_scheme_table.active_colors
416 475 ColorsNormal = Colors.Normal
417 476 tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
418 477 tpl_call = "%s%%s%s%%s%s" % (Colors.vName, Colors.valEm, ColorsNormal)
419 478 tpl_line = "%%s%s%%s %s%%s" % (Colors.lineno, ColorsNormal)
420 479 tpl_line_em = "%%s%s%%s %s%%s%s" % (Colors.linenoEm, Colors.line, ColorsNormal)
421 480
422 481 frame, lineno = frame_lineno
423 482
424 483 return_value = ''
425 if '__return__' in frame.f_locals:
426 rv = frame.f_locals['__return__']
427 #return_value += '->'
428 return_value += reprlib.repr(rv) + '\n'
484 loc_frame = self._get_frame_locals(frame)
485 if "__return__" in loc_frame:
486 rv = loc_frame["__return__"]
487 # return_value += '->'
488 return_value += reprlib.repr(rv) + "\n"
429 489 ret.append(return_value)
430 490
431 491 #s = filename + '(' + `lineno` + ')'
432 492 filename = self.canonic(frame.f_code.co_filename)
433 493 link = tpl_link % py3compat.cast_unicode(filename)
434 494
435 495 if frame.f_code.co_name:
436 496 func = frame.f_code.co_name
437 497 else:
438 498 func = "<lambda>"
439 499
440 call = ''
441 if func != '?':
442 if '__args__' in frame.f_locals:
443 args = reprlib.repr(frame.f_locals['__args__'])
500 call = ""
501 if func != "?":
502 if "__args__" in loc_frame:
503 args = reprlib.repr(loc_frame["__args__"])
444 504 else:
445 505 args = '()'
446 506 call = tpl_call % (func, args)
447 507
448 508 # The level info should be generated in the same format pdb uses, to
449 509 # avoid breaking the pdbtrack functionality of python-mode in *emacs.
450 510 if frame is self.curframe:
451 511 ret.append('> ')
452 512 else:
453 513 ret.append(" ")
454 514 ret.append("%s(%s)%s\n" % (link, lineno, call))
455 515
456 516 start = lineno - 1 - context//2
457 517 lines = linecache.getlines(filename)
458 518 start = min(start, len(lines) - context)
459 519 start = max(start, 0)
460 520 lines = lines[start : start + context]
461 521
462 522 for i, line in enumerate(lines):
463 523 show_arrow = start + 1 + i == lineno
464 524 linetpl = (frame is self.curframe or show_arrow) and tpl_line_em or tpl_line
465 525 ret.append(
466 526 self.__format_line(
467 527 linetpl, filename, start + 1 + i, line, arrow=show_arrow
468 528 )
469 529 )
470 530 return "".join(ret)
471 531
472 532 def __format_line(self, tpl_line, filename, lineno, line, arrow=False):
473 533 bp_mark = ""
474 534 bp_mark_color = ""
475 535
476 536 new_line, err = self.parser.format2(line, 'str')
477 537 if not err:
478 538 line = new_line
479 539
480 540 bp = None
481 541 if lineno in self.get_file_breaks(filename):
482 542 bps = self.get_breaks(filename, lineno)
483 543 bp = bps[-1]
484 544
485 545 if bp:
486 546 Colors = self.color_scheme_table.active_colors
487 547 bp_mark = str(bp.number)
488 548 bp_mark_color = Colors.breakpoint_enabled
489 549 if not bp.enabled:
490 550 bp_mark_color = Colors.breakpoint_disabled
491 551
492 552 numbers_width = 7
493 553 if arrow:
494 554 # This is the line with the error
495 555 pad = numbers_width - len(str(lineno)) - len(bp_mark)
496 556 num = '%s%s' % (make_arrow(pad), str(lineno))
497 557 else:
498 558 num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
499 559
500 560 return tpl_line % (bp_mark_color + bp_mark, num, line)
501 561
502 562 def print_list_lines(self, filename, first, last):
503 563 """The printing (as opposed to the parsing part of a 'list'
504 564 command."""
505 565 try:
506 566 Colors = self.color_scheme_table.active_colors
507 567 ColorsNormal = Colors.Normal
508 568 tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
509 569 tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
510 570 src = []
511 571 if filename == "<string>" and hasattr(self, "_exec_filename"):
512 572 filename = self._exec_filename
513 573
514 574 for lineno in range(first, last+1):
515 575 line = linecache.getline(filename, lineno)
516 576 if not line:
517 577 break
518 578
519 579 if lineno == self.curframe.f_lineno:
520 580 line = self.__format_line(
521 581 tpl_line_em, filename, lineno, line, arrow=True
522 582 )
523 583 else:
524 584 line = self.__format_line(
525 585 tpl_line, filename, lineno, line, arrow=False
526 586 )
527 587
528 588 src.append(line)
529 589 self.lineno = lineno
530 590
531 591 print(''.join(src), file=self.stdout)
532 592
533 593 except KeyboardInterrupt:
534 594 pass
535 595
596 def do_skip_predicates(self, args):
597 """
598 Turn on/off individual predicates as to whether a frame should be hidden/skip.
599
600 The global option to skip (or not) hidden frames is set with skip_hidden
601
602 To change the value of a predicate
603
604 skip_predicates key [true|false]
605
606 Call without arguments to see the current values.
607
608 To permanently change the value of an option add the corresponding
609 command to your ``~/.pdbrc`` file. If you are programmatically using the
610 Pdb instance you can also change the ``default_predicates`` class
611 attribute.
612 """
613 if not args.strip():
614 print("current predicates:")
615 for (p, v) in self._predicates.items():
616 print(" ", p, ":", v)
617 return
618 type_value = args.strip().split(" ")
619 if len(type_value) != 2:
620 print(
621 f"Usage: skip_predicates <type> <value>, with <type> one of {set(self._predicates.keys())}"
622 )
623 return
624
625 type_, value = type_value
626 if type_ not in self._predicates:
627 print(f"{type_!r} not in {set(self._predicates.keys())}")
628 return
629 if value.lower() not in ("true", "yes", "1", "no", "false", "0"):
630 print(
631 f"{value!r} is invalid - use one of ('true', 'yes', '1', 'no', 'false', '0')"
632 )
633 return
634
635 self._predicates[type_] = value.lower() in ("true", "yes", "1")
636 if not any(self._predicates.values()):
637 print(
638 "Warning, all predicates set to False, skip_hidden may not have any effects."
639 )
640
536 641 def do_skip_hidden(self, arg):
537 642 """
538 643 Change whether or not we should skip frames with the
539 644 __tracebackhide__ attribute.
540 645 """
541 if arg.strip().lower() in ("true", "yes"):
646 if not arg.strip():
647 print(
648 f"skip_hidden = {self.skip_hidden}, use 'yes','no', 'true', or 'false' to change."
649 )
650 elif arg.strip().lower() in ("true", "yes"):
542 651 self.skip_hidden = True
543 652 elif arg.strip().lower() in ("false", "no"):
544 653 self.skip_hidden = False
654 if not any(self._predicates.values()):
655 print(
656 "Warning, all predicates set to False, skip_hidden may not have any effects."
657 )
545 658
546 659 def do_list(self, arg):
547 660 """Print lines of code from the current stack frame
548 661 """
549 662 self.lastcmd = 'list'
550 663 last = None
551 664 if arg:
552 665 try:
553 666 x = eval(arg, {}, {})
554 667 if type(x) == type(()):
555 668 first, last = x
556 669 first = int(first)
557 670 last = int(last)
558 671 if last < first:
559 672 # Assume it's a count
560 673 last = first + last
561 674 else:
562 675 first = max(1, int(x) - 5)
563 676 except:
564 677 print('*** Error in argument:', repr(arg), file=self.stdout)
565 678 return
566 679 elif self.lineno is None:
567 680 first = max(1, self.curframe.f_lineno - 5)
568 681 else:
569 682 first = self.lineno + 1
570 683 if last is None:
571 684 last = first + 10
572 685 self.print_list_lines(self.curframe.f_code.co_filename, first, last)
573 686
574 687 # vds: >>
575 688 lineno = first
576 689 filename = self.curframe.f_code.co_filename
577 690 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
578 691 # vds: <<
579 692
580 693 do_l = do_list
581 694
582 695 def getsourcelines(self, obj):
583 696 lines, lineno = inspect.findsource(obj)
584 if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
697 if inspect.isframe(obj) and obj.f_globals is self._get_frame_locals(obj):
585 698 # must be a module frame: do not try to cut a block out of it
586 699 return lines, 1
587 700 elif inspect.ismodule(obj):
588 701 return lines, 1
589 702 return inspect.getblock(lines[lineno:]), lineno+1
590 703
591 704 def do_longlist(self, arg):
592 705 """Print lines of code from the current stack frame.
593 706
594 707 Shows more lines than 'list' does.
595 708 """
596 709 self.lastcmd = 'longlist'
597 710 try:
598 711 lines, lineno = self.getsourcelines(self.curframe)
599 712 except OSError as err:
600 713 self.error(err)
601 714 return
602 715 last = lineno + len(lines)
603 716 self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
604 717 do_ll = do_longlist
605 718
606 719 def do_debug(self, arg):
607 720 """debug code
608 721 Enter a recursive debugger that steps through the code
609 722 argument (which is an arbitrary expression or statement to be
610 723 executed in the current environment).
611 724 """
612 725 trace_function = sys.gettrace()
613 726 sys.settrace(None)
614 727 globals = self.curframe.f_globals
615 728 locals = self.curframe_locals
616 729 p = self.__class__(completekey=self.completekey,
617 730 stdin=self.stdin, stdout=self.stdout)
618 731 p.use_rawinput = self.use_rawinput
619 732 p.prompt = "(%s) " % self.prompt.strip()
620 733 self.message("ENTERING RECURSIVE DEBUGGER")
621 734 sys.call_tracing(p.run, (arg, globals, locals))
622 735 self.message("LEAVING RECURSIVE DEBUGGER")
623 736 sys.settrace(trace_function)
624 737 self.lastcmd = p.lastcmd
625 738
626 739 def do_pdef(self, arg):
627 740 """Print the call signature for any callable object.
628 741
629 742 The debugger interface to %pdef"""
630 743 namespaces = [
631 744 ("Locals", self.curframe_locals),
632 745 ("Globals", self.curframe.f_globals),
633 746 ]
634 747 self.shell.find_line_magic("pdef")(arg, namespaces=namespaces)
635 748
636 749 def do_pdoc(self, arg):
637 750 """Print the docstring for an object.
638 751
639 752 The debugger interface to %pdoc."""
640 753 namespaces = [
641 754 ("Locals", self.curframe_locals),
642 755 ("Globals", self.curframe.f_globals),
643 756 ]
644 757 self.shell.find_line_magic("pdoc")(arg, namespaces=namespaces)
645 758
646 759 def do_pfile(self, arg):
647 760 """Print (or run through pager) the file where an object is defined.
648 761
649 762 The debugger interface to %pfile.
650 763 """
651 764 namespaces = [
652 765 ("Locals", self.curframe_locals),
653 766 ("Globals", self.curframe.f_globals),
654 767 ]
655 768 self.shell.find_line_magic("pfile")(arg, namespaces=namespaces)
656 769
657 770 def do_pinfo(self, arg):
658 771 """Provide detailed information about an object.
659 772
660 773 The debugger interface to %pinfo, i.e., obj?."""
661 774 namespaces = [
662 775 ("Locals", self.curframe_locals),
663 776 ("Globals", self.curframe.f_globals),
664 777 ]
665 778 self.shell.find_line_magic("pinfo")(arg, namespaces=namespaces)
666 779
667 780 def do_pinfo2(self, arg):
668 781 """Provide extra detailed information about an object.
669 782
670 783 The debugger interface to %pinfo2, i.e., obj??."""
671 784 namespaces = [
672 785 ("Locals", self.curframe_locals),
673 786 ("Globals", self.curframe.f_globals),
674 787 ]
675 788 self.shell.find_line_magic("pinfo2")(arg, namespaces=namespaces)
676 789
677 790 def do_psource(self, arg):
678 791 """Print (or run through pager) the source code for an object."""
679 792 namespaces = [
680 793 ("Locals", self.curframe_locals),
681 794 ("Globals", self.curframe.f_globals),
682 795 ]
683 796 self.shell.find_line_magic("psource")(arg, namespaces=namespaces)
684 797
685 798 def do_where(self, arg):
686 799 """w(here)
687 800 Print a stack trace, with the most recent frame at the bottom.
688 801 An arrow indicates the "current frame", which determines the
689 802 context of most commands. 'bt' is an alias for this command.
690 803
691 804 Take a number as argument as an (optional) number of context line to
692 805 print"""
693 806 if arg:
694 807 try:
695 808 context = int(arg)
696 809 except ValueError as err:
697 810 self.error(err)
698 811 return
699 812 self.print_stack_trace(context)
700 813 else:
701 814 self.print_stack_trace()
702 815
703 816 do_w = do_where
704 817
705 818 def stop_here(self, frame):
706 819 hidden = False
707 820 if self.skip_hidden:
708 hidden = frame.f_locals.get("__tracebackhide__", False)
821 hidden = self._hidden_predicate(frame)
709 822 if hidden:
710 Colors = self.color_scheme_table.active_colors
711 ColorsNormal = Colors.Normal
712 print(f"{Colors.excName} [... skipped 1 hidden frame]{ColorsNormal}\n")
713
823 if self.report_skipped:
824 Colors = self.color_scheme_table.active_colors
825 ColorsNormal = Colors.Normal
826 print(
827 f"{Colors.excName} [... skipped 1 hidden frame]{ColorsNormal}\n"
828 )
714 829 return super().stop_here(frame)
715 830
716 831 def do_up(self, arg):
717 832 """u(p) [count]
718 833 Move the current frame count (default one) levels up in the
719 834 stack trace (to an older frame).
720 835
721 836 Will skip hidden frames.
722 837 """
723 838 # modified version of upstream that skips
724 # frames with __tracebackide__
839 # frames with __tracebackhide__
725 840 if self.curindex == 0:
726 841 self.error("Oldest frame")
727 842 return
728 843 try:
729 844 count = int(arg or 1)
730 845 except ValueError:
731 846 self.error("Invalid frame count (%s)" % arg)
732 847 return
733 848 skipped = 0
734 849 if count < 0:
735 850 _newframe = 0
736 851 else:
737 852 counter = 0
738 853 hidden_frames = self.hidden_frames(self.stack)
739 854 for i in range(self.curindex - 1, -1, -1):
740 855 if hidden_frames[i] and self.skip_hidden:
741 856 skipped += 1
742 857 continue
743 858 counter += 1
744 859 if counter >= count:
745 860 break
746 861 else:
747 862 # if no break occured.
748 863 self.error(
749 864 "all frames above hidden, use `skip_hidden False` to get get into those."
750 865 )
751 866 return
752 867
753 868 Colors = self.color_scheme_table.active_colors
754 869 ColorsNormal = Colors.Normal
755 870 _newframe = i
756 871 self._select_frame(_newframe)
757 872 if skipped:
758 873 print(
759 874 f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
760 875 )
761 876
762 877 def do_down(self, arg):
763 878 """d(own) [count]
764 879 Move the current frame count (default one) levels down in the
765 880 stack trace (to a newer frame).
766 881
767 882 Will skip hidden frames.
768 883 """
769 884 if self.curindex + 1 == len(self.stack):
770 885 self.error("Newest frame")
771 886 return
772 887 try:
773 888 count = int(arg or 1)
774 889 except ValueError:
775 890 self.error("Invalid frame count (%s)" % arg)
776 891 return
777 892 if count < 0:
778 893 _newframe = len(self.stack) - 1
779 894 else:
780 895 counter = 0
781 896 skipped = 0
782 897 hidden_frames = self.hidden_frames(self.stack)
783 898 for i in range(self.curindex + 1, len(self.stack)):
784 899 if hidden_frames[i] and self.skip_hidden:
785 900 skipped += 1
786 901 continue
787 902 counter += 1
788 903 if counter >= count:
789 904 break
790 905 else:
791 906 self.error(
792 907 "all frames bellow hidden, use `skip_hidden False` to get get into those."
793 908 )
794 909 return
795 910
796 911 Colors = self.color_scheme_table.active_colors
797 912 ColorsNormal = Colors.Normal
798 913 if skipped:
799 914 print(
800 915 f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
801 916 )
802 917 _newframe = i
803 918
804 919 self._select_frame(_newframe)
805 920
806 921 do_d = do_down
807 922 do_u = do_up
808 923
809 924 def do_context(self, context):
810 925 """context number_of_lines
811 926 Set the number of lines of source code to show when displaying
812 927 stacktrace information.
813 928 """
814 929 try:
815 930 new_context = int(context)
816 931 if new_context <= 0:
817 932 raise ValueError()
818 933 self.context = new_context
819 934 except ValueError:
820 935 self.error("The 'context' command requires a positive integer argument.")
821 936
822 937
823 938 class InterruptiblePdb(Pdb):
824 939 """Version of debugger where KeyboardInterrupt exits the debugger altogether."""
825 940
826 941 def cmdloop(self):
827 942 """Wrap cmdloop() such that KeyboardInterrupt stops the debugger."""
828 943 try:
829 944 return OldPdb.cmdloop(self)
830 945 except KeyboardInterrupt:
831 946 self.stop_here = lambda frame: False
832 947 self.do_quit("")
833 948 sys.settrace(None)
834 949 self.quitting = False
835 950 raise
836 951
837 952 def _cmdloop(self):
838 953 while True:
839 954 try:
840 955 # keyboard interrupts allow for an easy way to cancel
841 956 # the current command, so allow them during interactive input
842 957 self.allow_kbdint = True
843 958 self.cmdloop()
844 959 self.allow_kbdint = False
845 960 break
846 961 except KeyboardInterrupt:
847 962 self.message('--KeyboardInterrupt--')
848 963 raise
849 964
850 965
851 966 def set_trace(frame=None):
852 967 """
853 968 Start debugging from `frame`.
854 969
855 970 If frame is not specified, debugging starts from caller's frame.
856 971 """
857 972 Pdb().set_trace(frame or sys._getframe().f_back)
@@ -1,1234 +1,1256 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Top-level display functions for displaying object in different formats."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7
8 8 from binascii import b2a_base64, hexlify
9 9 import html
10 10 import json
11 11 import mimetypes
12 12 import os
13 13 import struct
14 14 import warnings
15 15 from copy import deepcopy
16 16 from os.path import splitext
17 17 from pathlib import Path, PurePath
18 18
19 19 from IPython.utils.py3compat import cast_unicode
20 20 from IPython.testing.skipdoctest import skip_doctest
21 21 from . import display_functions
22 22
23 23
24 24 __all__ = ['display_pretty', 'display_html', 'display_markdown',
25 25 'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
26 26 'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
27 27 'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'ProgressBar', 'JSON',
28 28 'GeoJSON', 'Javascript', 'Image', 'set_matplotlib_formats',
29 29 'set_matplotlib_close',
30 30 'Video']
31 31
32 32 _deprecated_names = ["display", "clear_output", "publish_display_data", "update_display", "DisplayHandle"]
33 33
34 34 __all__ = __all__ + _deprecated_names
35 35
36 36
37 37 # ----- warn to import from IPython.display -----
38 38
39 39 from warnings import warn
40 40
41 41
42 42 def __getattr__(name):
43 43 if name in _deprecated_names:
44 44 warn(f"Importing {name} from IPython.core.display is deprecated since IPython 7.14, please import from IPython display", DeprecationWarning, stacklevel=2)
45 45 return getattr(display_functions, name)
46 46
47 47 if name in globals().keys():
48 48 return globals()[name]
49 49 else:
50 50 raise AttributeError(f"module {__name__} has no attribute {name}")
51 51
52 52
53 53 #-----------------------------------------------------------------------------
54 54 # utility functions
55 55 #-----------------------------------------------------------------------------
56 56
57 57 def _safe_exists(path):
58 58 """Check path, but don't let exceptions raise"""
59 59 try:
60 60 return os.path.exists(path)
61 61 except Exception:
62 62 return False
63 63
64 64
65 65 def _display_mimetype(mimetype, objs, raw=False, metadata=None):
66 66 """internal implementation of all display_foo methods
67 67
68 68 Parameters
69 69 ----------
70 70 mimetype : str
71 71 The mimetype to be published (e.g. 'image/png')
72 72 *objs : object
73 73 The Python objects to display, or if raw=True raw text data to
74 74 display.
75 75 raw : bool
76 76 Are the data objects raw data or Python objects that need to be
77 77 formatted before display? [default: False]
78 78 metadata : dict (optional)
79 79 Metadata to be associated with the specific mimetype output.
80 80 """
81 81 if metadata:
82 82 metadata = {mimetype: metadata}
83 83 if raw:
84 84 # turn list of pngdata into list of { 'image/png': pngdata }
85 85 objs = [ {mimetype: obj} for obj in objs ]
86 86 display(*objs, raw=raw, metadata=metadata, include=[mimetype])
87 87
88 88 #-----------------------------------------------------------------------------
89 89 # Main functions
90 90 #-----------------------------------------------------------------------------
91 91
92 92
93 93 def display_pretty(*objs, **kwargs):
94 94 """Display the pretty (default) representation of an object.
95 95
96 96 Parameters
97 97 ----------
98 98 *objs : object
99 99 The Python objects to display, or if raw=True raw text data to
100 100 display.
101 101 raw : bool
102 102 Are the data objects raw data or Python objects that need to be
103 103 formatted before display? [default: False]
104 104 metadata : dict (optional)
105 105 Metadata to be associated with the specific mimetype output.
106 106 """
107 107 _display_mimetype('text/plain', objs, **kwargs)
108 108
109 109
110 110 def display_html(*objs, **kwargs):
111 111 """Display the HTML representation of an object.
112 112
113 113 Note: If raw=False and the object does not have a HTML
114 114 representation, no HTML will be shown.
115 115
116 116 Parameters
117 117 ----------
118 118 *objs : object
119 119 The Python objects to display, or if raw=True raw HTML data to
120 120 display.
121 121 raw : bool
122 122 Are the data objects raw data or Python objects that need to be
123 123 formatted before display? [default: False]
124 124 metadata : dict (optional)
125 125 Metadata to be associated with the specific mimetype output.
126 126 """
127 127 _display_mimetype('text/html', objs, **kwargs)
128 128
129 129
130 130 def display_markdown(*objs, **kwargs):
131 131 """Displays the Markdown representation of an object.
132 132
133 133 Parameters
134 134 ----------
135 135 *objs : object
136 136 The Python objects to display, or if raw=True raw markdown data to
137 137 display.
138 138 raw : bool
139 139 Are the data objects raw data or Python objects that need to be
140 140 formatted before display? [default: False]
141 141 metadata : dict (optional)
142 142 Metadata to be associated with the specific mimetype output.
143 143 """
144 144
145 145 _display_mimetype('text/markdown', objs, **kwargs)
146 146
147 147
148 148 def display_svg(*objs, **kwargs):
149 149 """Display the SVG representation of an object.
150 150
151 151 Parameters
152 152 ----------
153 153 *objs : object
154 154 The Python objects to display, or if raw=True raw svg data to
155 155 display.
156 156 raw : bool
157 157 Are the data objects raw data or Python objects that need to be
158 158 formatted before display? [default: False]
159 159 metadata : dict (optional)
160 160 Metadata to be associated with the specific mimetype output.
161 161 """
162 162 _display_mimetype('image/svg+xml', objs, **kwargs)
163 163
164 164
165 165 def display_png(*objs, **kwargs):
166 166 """Display the PNG representation of an object.
167 167
168 168 Parameters
169 169 ----------
170 170 *objs : object
171 171 The Python objects to display, or if raw=True raw png data to
172 172 display.
173 173 raw : bool
174 174 Are the data objects raw data or Python objects that need to be
175 175 formatted before display? [default: False]
176 176 metadata : dict (optional)
177 177 Metadata to be associated with the specific mimetype output.
178 178 """
179 179 _display_mimetype('image/png', objs, **kwargs)
180 180
181 181
182 182 def display_jpeg(*objs, **kwargs):
183 183 """Display the JPEG representation of an object.
184 184
185 185 Parameters
186 186 ----------
187 187 *objs : object
188 188 The Python objects to display, or if raw=True raw JPEG data to
189 189 display.
190 190 raw : bool
191 191 Are the data objects raw data or Python objects that need to be
192 192 formatted before display? [default: False]
193 193 metadata : dict (optional)
194 194 Metadata to be associated with the specific mimetype output.
195 195 """
196 196 _display_mimetype('image/jpeg', objs, **kwargs)
197 197
198 198
199 199 def display_latex(*objs, **kwargs):
200 200 """Display the LaTeX representation of an object.
201 201
202 202 Parameters
203 203 ----------
204 204 *objs : object
205 205 The Python objects to display, or if raw=True raw latex data to
206 206 display.
207 207 raw : bool
208 208 Are the data objects raw data or Python objects that need to be
209 209 formatted before display? [default: False]
210 210 metadata : dict (optional)
211 211 Metadata to be associated with the specific mimetype output.
212 212 """
213 213 _display_mimetype('text/latex', objs, **kwargs)
214 214
215 215
216 216 def display_json(*objs, **kwargs):
217 217 """Display the JSON representation of an object.
218 218
219 219 Note that not many frontends support displaying JSON.
220 220
221 221 Parameters
222 222 ----------
223 223 *objs : object
224 224 The Python objects to display, or if raw=True raw json data to
225 225 display.
226 226 raw : bool
227 227 Are the data objects raw data or Python objects that need to be
228 228 formatted before display? [default: False]
229 229 metadata : dict (optional)
230 230 Metadata to be associated with the specific mimetype output.
231 231 """
232 232 _display_mimetype('application/json', objs, **kwargs)
233 233
234 234
235 235 def display_javascript(*objs, **kwargs):
236 236 """Display the Javascript representation of an object.
237 237
238 238 Parameters
239 239 ----------
240 240 *objs : object
241 241 The Python objects to display, or if raw=True raw javascript data to
242 242 display.
243 243 raw : bool
244 244 Are the data objects raw data or Python objects that need to be
245 245 formatted before display? [default: False]
246 246 metadata : dict (optional)
247 247 Metadata to be associated with the specific mimetype output.
248 248 """
249 249 _display_mimetype('application/javascript', objs, **kwargs)
250 250
251 251
252 252 def display_pdf(*objs, **kwargs):
253 253 """Display the PDF representation of an object.
254 254
255 255 Parameters
256 256 ----------
257 257 *objs : object
258 258 The Python objects to display, or if raw=True raw javascript data to
259 259 display.
260 260 raw : bool
261 261 Are the data objects raw data or Python objects that need to be
262 262 formatted before display? [default: False]
263 263 metadata : dict (optional)
264 264 Metadata to be associated with the specific mimetype output.
265 265 """
266 266 _display_mimetype('application/pdf', objs, **kwargs)
267 267
268 268
269 269 #-----------------------------------------------------------------------------
270 270 # Smart classes
271 271 #-----------------------------------------------------------------------------
272 272
273 273
274 274 class DisplayObject(object):
275 275 """An object that wraps data to be displayed."""
276 276
277 277 _read_flags = 'r'
278 278 _show_mem_addr = False
279 279 metadata = None
280 280
281 281 def __init__(self, data=None, url=None, filename=None, metadata=None):
282 282 """Create a display object given raw data.
283 283
284 284 When this object is returned by an expression or passed to the
285 285 display function, it will result in the data being displayed
286 286 in the frontend. The MIME type of the data should match the
287 287 subclasses used, so the Png subclass should be used for 'image/png'
288 288 data. If the data is a URL, the data will first be downloaded
289 289 and then displayed. If
290 290
291 291 Parameters
292 292 ----------
293 293 data : unicode, str or bytes
294 294 The raw data or a URL or file to load the data from
295 295 url : unicode
296 296 A URL to download the data from.
297 297 filename : unicode
298 298 Path to a local file to load the data from.
299 299 metadata : dict
300 300 Dict of metadata associated to be the object when displayed
301 301 """
302 302 if isinstance(data, (Path, PurePath)):
303 303 data = str(data)
304 304
305 305 if data is not None and isinstance(data, str):
306 306 if data.startswith('http') and url is None:
307 307 url = data
308 308 filename = None
309 309 data = None
310 310 elif _safe_exists(data) and filename is None:
311 311 url = None
312 312 filename = data
313 313 data = None
314 314
315 315 self.url = url
316 316 self.filename = filename
317 317 # because of @data.setter methods in
318 318 # subclasses ensure url and filename are set
319 319 # before assigning to self.data
320 320 self.data = data
321 321
322 322 if metadata is not None:
323 323 self.metadata = metadata
324 324 elif self.metadata is None:
325 325 self.metadata = {}
326 326
327 327 self.reload()
328 328 self._check_data()
329 329
330 330 def __repr__(self):
331 331 if not self._show_mem_addr:
332 332 cls = self.__class__
333 333 r = "<%s.%s object>" % (cls.__module__, cls.__name__)
334 334 else:
335 335 r = super(DisplayObject, self).__repr__()
336 336 return r
337 337
338 338 def _check_data(self):
339 339 """Override in subclasses if there's something to check."""
340 340 pass
341 341
342 342 def _data_and_metadata(self):
343 343 """shortcut for returning metadata with shape information, if defined"""
344 344 if self.metadata:
345 345 return self.data, deepcopy(self.metadata)
346 346 else:
347 347 return self.data
348 348
349 349 def reload(self):
350 350 """Reload the raw data from file or URL."""
351 351 if self.filename is not None:
352 352 with open(self.filename, self._read_flags) as f:
353 353 self.data = f.read()
354 354 elif self.url is not None:
355 355 # Deferred import
356 356 from urllib.request import urlopen
357 357 response = urlopen(self.url)
358 358 data = response.read()
359 359 # extract encoding from header, if there is one:
360 360 encoding = None
361 361 if 'content-type' in response.headers:
362 362 for sub in response.headers['content-type'].split(';'):
363 363 sub = sub.strip()
364 364 if sub.startswith('charset'):
365 365 encoding = sub.split('=')[-1].strip()
366 366 break
367 367 if 'content-encoding' in response.headers:
368 368 # TODO: do deflate?
369 369 if 'gzip' in response.headers['content-encoding']:
370 370 import gzip
371 371 from io import BytesIO
372 372 with gzip.open(BytesIO(data), 'rt', encoding=encoding) as fp:
373 373 encoding = None
374 374 data = fp.read()
375 375
376 376 # decode data, if an encoding was specified
377 377 # We only touch self.data once since
378 378 # subclasses such as SVG have @data.setter methods
379 379 # that transform self.data into ... well svg.
380 380 if encoding:
381 381 self.data = data.decode(encoding, 'replace')
382 382 else:
383 383 self.data = data
384 384
385 385
386 386 class TextDisplayObject(DisplayObject):
387 387 """Validate that display data is text"""
388 388 def _check_data(self):
389 389 if self.data is not None and not isinstance(self.data, str):
390 390 raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
391 391
392 392 class Pretty(TextDisplayObject):
393 393
394 394 def _repr_pretty_(self, pp, cycle):
395 395 return pp.text(self.data)
396 396
397 397
398 398 class HTML(TextDisplayObject):
399 399
400 400 def __init__(self, data=None, url=None, filename=None, metadata=None):
401 401 def warn():
402 402 if not data:
403 403 return False
404 404
405 405 #
406 406 # Avoid calling lower() on the entire data, because it could be a
407 407 # long string and we're only interested in its beginning and end.
408 408 #
409 409 prefix = data[:10].lower()
410 410 suffix = data[-10:].lower()
411 411 return prefix.startswith("<iframe ") and suffix.endswith("</iframe>")
412 412
413 413 if warn():
414 414 warnings.warn("Consider using IPython.display.IFrame instead")
415 415 super(HTML, self).__init__(data=data, url=url, filename=filename, metadata=metadata)
416 416
417 417 def _repr_html_(self):
418 418 return self._data_and_metadata()
419 419
420 420 def __html__(self):
421 421 """
422 422 This method exists to inform other HTML-using modules (e.g. Markupsafe,
423 423 htmltag, etc) that this object is HTML and does not need things like
424 424 special characters (<>&) escaped.
425 425 """
426 426 return self._repr_html_()
427 427
428 428
429 429 class Markdown(TextDisplayObject):
430 430
431 431 def _repr_markdown_(self):
432 432 return self._data_and_metadata()
433 433
434 434
435 435 class Math(TextDisplayObject):
436 436
437 437 def _repr_latex_(self):
438 438 s = r"$\displaystyle %s$" % self.data.strip('$')
439 439 if self.metadata:
440 440 return s, deepcopy(self.metadata)
441 441 else:
442 442 return s
443 443
444 444
445 445 class Latex(TextDisplayObject):
446 446
447 447 def _repr_latex_(self):
448 448 return self._data_and_metadata()
449 449
450 450
451 451 class SVG(DisplayObject):
452 452 """Embed an SVG into the display.
453 453
454 454 Note if you just want to view a svg image via a URL use `:class:Image` with
455 455 a url=URL keyword argument.
456 456 """
457 457
458 458 _read_flags = 'rb'
459 459 # wrap data in a property, which extracts the <svg> tag, discarding
460 460 # document headers
461 461 _data = None
462 462
463 463 @property
464 464 def data(self):
465 465 return self._data
466 466
467 467 @data.setter
468 468 def data(self, svg):
469 469 if svg is None:
470 470 self._data = None
471 471 return
472 472 # parse into dom object
473 473 from xml.dom import minidom
474 474 x = minidom.parseString(svg)
475 475 # get svg tag (should be 1)
476 476 found_svg = x.getElementsByTagName('svg')
477 477 if found_svg:
478 478 svg = found_svg[0].toxml()
479 479 else:
480 480 # fallback on the input, trust the user
481 481 # but this is probably an error.
482 482 pass
483 483 svg = cast_unicode(svg)
484 484 self._data = svg
485 485
486 486 def _repr_svg_(self):
487 487 return self._data_and_metadata()
488 488
489 489 class ProgressBar(DisplayObject):
490 490 """Progressbar supports displaying a progressbar like element
491 491 """
492 492 def __init__(self, total):
493 493 """Creates a new progressbar
494 494
495 495 Parameters
496 496 ----------
497 497 total : int
498 498 maximum size of the progressbar
499 499 """
500 500 self.total = total
501 501 self._progress = 0
502 502 self.html_width = '60ex'
503 503 self.text_width = 60
504 504 self._display_id = hexlify(os.urandom(8)).decode('ascii')
505 505
506 506 def __repr__(self):
507 507 fraction = self.progress / self.total
508 508 filled = '=' * int(fraction * self.text_width)
509 509 rest = ' ' * (self.text_width - len(filled))
510 510 return '[{}{}] {}/{}'.format(
511 511 filled, rest,
512 512 self.progress, self.total,
513 513 )
514 514
515 515 def _repr_html_(self):
516 516 return "<progress style='width:{}' max='{}' value='{}'></progress>".format(
517 517 self.html_width, self.total, self.progress)
518 518
519 519 def display(self):
520 520 display(self, display_id=self._display_id)
521 521
522 522 def update(self):
523 523 display(self, display_id=self._display_id, update=True)
524 524
525 525 @property
526 526 def progress(self):
527 527 return self._progress
528 528
529 529 @progress.setter
530 530 def progress(self, value):
531 531 self._progress = value
532 532 self.update()
533 533
534 534 def __iter__(self):
535 535 self.display()
536 536 self._progress = -1 # First iteration is 0
537 537 return self
538 538
539 539 def __next__(self):
540 540 """Returns current value and increments display by one."""
541 541 self.progress += 1
542 542 if self.progress < self.total:
543 543 return self.progress
544 544 else:
545 545 raise StopIteration()
546 546
547 547 class JSON(DisplayObject):
548 548 """JSON expects a JSON-able dict or list
549 549
550 550 not an already-serialized JSON string.
551 551
552 552 Scalar types (None, number, string) are not allowed, only dict or list containers.
553 553 """
554 554 # wrap data in a property, which warns about passing already-serialized JSON
555 555 _data = None
556 556 def __init__(self, data=None, url=None, filename=None, expanded=False, metadata=None, root='root', **kwargs):
557 557 """Create a JSON display object given raw data.
558 558
559 559 Parameters
560 560 ----------
561 561 data : dict or list
562 562 JSON data to display. Not an already-serialized JSON string.
563 563 Scalar types (None, number, string) are not allowed, only dict
564 564 or list containers.
565 565 url : unicode
566 566 A URL to download the data from.
567 567 filename : unicode
568 568 Path to a local file to load the data from.
569 569 expanded : boolean
570 570 Metadata to control whether a JSON display component is expanded.
571 571 metadata : dict
572 572 Specify extra metadata to attach to the json display object.
573 573 root : str
574 574 The name of the root element of the JSON tree
575 575 """
576 576 self.metadata = {
577 577 'expanded': expanded,
578 578 'root': root,
579 579 }
580 580 if metadata:
581 581 self.metadata.update(metadata)
582 582 if kwargs:
583 583 self.metadata.update(kwargs)
584 584 super(JSON, self).__init__(data=data, url=url, filename=filename)
585 585
586 586 def _check_data(self):
587 587 if self.data is not None and not isinstance(self.data, (dict, list)):
588 588 raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
589 589
590 590 @property
591 591 def data(self):
592 592 return self._data
593 593
594 594 @data.setter
595 595 def data(self, data):
596 596 if isinstance(data, (Path, PurePath)):
597 597 data = str(data)
598 598
599 599 if isinstance(data, str):
600 600 if self.filename is None and self.url is None:
601 601 warnings.warn("JSON expects JSONable dict or list, not JSON strings")
602 602 data = json.loads(data)
603 603 self._data = data
604 604
605 605 def _data_and_metadata(self):
606 606 return self.data, self.metadata
607 607
608 608 def _repr_json_(self):
609 609 return self._data_and_metadata()
610 610
611 611 _css_t = """var link = document.createElement("link");
612 612 link.ref = "stylesheet";
613 613 link.type = "text/css";
614 614 link.href = "%s";
615 615 document.head.appendChild(link);
616 616 """
617 617
618 618 _lib_t1 = """new Promise(function(resolve, reject) {
619 619 var script = document.createElement("script");
620 620 script.onload = resolve;
621 621 script.onerror = reject;
622 622 script.src = "%s";
623 623 document.head.appendChild(script);
624 624 }).then(() => {
625 625 """
626 626
627 627 _lib_t2 = """
628 628 });"""
629 629
630 630 class GeoJSON(JSON):
631 631 """GeoJSON expects JSON-able dict
632 632
633 633 not an already-serialized JSON string.
634 634
635 635 Scalar types (None, number, string) are not allowed, only dict containers.
636 636 """
637 637
638 638 def __init__(self, *args, **kwargs):
639 639 """Create a GeoJSON display object given raw data.
640 640
641 641 Parameters
642 642 ----------
643 643 data : dict or list
644 644 VegaLite data. Not an already-serialized JSON string.
645 645 Scalar types (None, number, string) are not allowed, only dict
646 646 or list containers.
647 647 url_template : string
648 648 Leaflet TileLayer URL template: http://leafletjs.com/reference.html#url-template
649 649 layer_options : dict
650 650 Leaflet TileLayer options: http://leafletjs.com/reference.html#tilelayer-options
651 651 url : unicode
652 652 A URL to download the data from.
653 653 filename : unicode
654 654 Path to a local file to load the data from.
655 655 metadata : dict
656 656 Specify extra metadata to attach to the json display object.
657 657
658 658 Examples
659 659 --------
660 660 The following will display an interactive map of Mars with a point of
661 661 interest on frontend that do support GeoJSON display.
662 662
663 663 >>> from IPython.display import GeoJSON
664 664
665 665 >>> GeoJSON(data={
666 666 ... "type": "Feature",
667 667 ... "geometry": {
668 668 ... "type": "Point",
669 669 ... "coordinates": [-81.327, 296.038]
670 670 ... }
671 671 ... },
672 672 ... url_template="http://s3-eu-west-1.amazonaws.com/whereonmars.cartodb.net/{basemap_id}/{z}/{x}/{y}.png",
673 673 ... layer_options={
674 674 ... "basemap_id": "celestia_mars-shaded-16k_global",
675 675 ... "attribution" : "Celestia/praesepe",
676 676 ... "minZoom" : 0,
677 677 ... "maxZoom" : 18,
678 678 ... })
679 679 <IPython.core.display.GeoJSON object>
680 680
681 681 In the terminal IPython, you will only see the text representation of
682 682 the GeoJSON object.
683 683
684 684 """
685 685
686 686 super(GeoJSON, self).__init__(*args, **kwargs)
687 687
688 688
689 689 def _ipython_display_(self):
690 690 bundle = {
691 691 'application/geo+json': self.data,
692 692 'text/plain': '<IPython.display.GeoJSON object>'
693 693 }
694 694 metadata = {
695 695 'application/geo+json': self.metadata
696 696 }
697 697 display(bundle, metadata=metadata, raw=True)
698 698
699 699 class Javascript(TextDisplayObject):
700 700
701 701 def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
702 702 """Create a Javascript display object given raw data.
703 703
704 704 When this object is returned by an expression or passed to the
705 705 display function, it will result in the data being displayed
706 706 in the frontend. If the data is a URL, the data will first be
707 707 downloaded and then displayed.
708 708
709 709 In the Notebook, the containing element will be available as `element`,
710 710 and jQuery will be available. Content appended to `element` will be
711 711 visible in the output area.
712 712
713 713 Parameters
714 714 ----------
715 715 data : unicode, str or bytes
716 716 The Javascript source code or a URL to download it from.
717 717 url : unicode
718 718 A URL to download the data from.
719 719 filename : unicode
720 720 Path to a local file to load the data from.
721 721 lib : list or str
722 722 A sequence of Javascript library URLs to load asynchronously before
723 723 running the source code. The full URLs of the libraries should
724 724 be given. A single Javascript library URL can also be given as a
725 725 string.
726 726 css : list or str
727 727 A sequence of css files to load before running the source code.
728 728 The full URLs of the css files should be given. A single css URL
729 729 can also be given as a string.
730 730 """
731 731 if isinstance(lib, str):
732 732 lib = [lib]
733 733 elif lib is None:
734 734 lib = []
735 735 if isinstance(css, str):
736 736 css = [css]
737 737 elif css is None:
738 738 css = []
739 739 if not isinstance(lib, (list,tuple)):
740 740 raise TypeError('expected sequence, got: %r' % lib)
741 741 if not isinstance(css, (list,tuple)):
742 742 raise TypeError('expected sequence, got: %r' % css)
743 743 self.lib = lib
744 744 self.css = css
745 745 super(Javascript, self).__init__(data=data, url=url, filename=filename)
746 746
747 747 def _repr_javascript_(self):
748 748 r = ''
749 749 for c in self.css:
750 750 r += _css_t % c
751 751 for l in self.lib:
752 752 r += _lib_t1 % l
753 753 r += self.data
754 754 r += _lib_t2*len(self.lib)
755 755 return r
756 756
757 757 # constants for identifying png/jpeg data
758 758 _PNG = b'\x89PNG\r\n\x1a\n'
759 759 _JPEG = b'\xff\xd8'
760 760
761 761 def _pngxy(data):
762 762 """read the (width, height) from a PNG header"""
763 763 ihdr = data.index(b'IHDR')
764 764 # next 8 bytes are width/height
765 765 return struct.unpack('>ii', data[ihdr+4:ihdr+12])
766 766
767 767 def _jpegxy(data):
768 768 """read the (width, height) from a JPEG header"""
769 769 # adapted from http://www.64lines.com/jpeg-width-height
770 770
771 771 idx = 4
772 772 while True:
773 773 block_size = struct.unpack('>H', data[idx:idx+2])[0]
774 774 idx = idx + block_size
775 775 if data[idx:idx+2] == b'\xFF\xC0':
776 776 # found Start of Frame
777 777 iSOF = idx
778 778 break
779 779 else:
780 780 # read another block
781 781 idx += 2
782 782
783 783 h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
784 784 return w, h
785 785
786 786 def _gifxy(data):
787 787 """read the (width, height) from a GIF header"""
788 788 return struct.unpack('<HH', data[6:10])
789 789
790 790
791 791 class Image(DisplayObject):
792 792
793 793 _read_flags = 'rb'
794 794 _FMT_JPEG = u'jpeg'
795 795 _FMT_PNG = u'png'
796 796 _FMT_GIF = u'gif'
797 797 _ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG, _FMT_GIF]
798 798 _MIMETYPES = {
799 799 _FMT_PNG: 'image/png',
800 800 _FMT_JPEG: 'image/jpeg',
801 801 _FMT_GIF: 'image/gif',
802 802 }
803 803
804 804 def __init__(
805 805 self,
806 806 data=None,
807 807 url=None,
808 808 filename=None,
809 809 format=None,
810 810 embed=None,
811 811 width=None,
812 812 height=None,
813 813 retina=False,
814 814 unconfined=False,
815 815 metadata=None,
816 816 alt=None,
817 817 ):
818 818 """Create a PNG/JPEG/GIF image object given raw data.
819 819
820 820 When this object is returned by an input cell or passed to the
821 821 display function, it will result in the image being displayed
822 822 in the frontend.
823 823
824 824 Parameters
825 825 ----------
826 826 data : unicode, str or bytes
827 827 The raw image data or a URL or filename to load the data from.
828 828 This always results in embedded image data.
829 829 url : unicode
830 830 A URL to download the data from. If you specify `url=`,
831 831 the image data will not be embedded unless you also specify `embed=True`.
832 832 filename : unicode
833 833 Path to a local file to load the data from.
834 834 Images from a file are always embedded.
835 835 format : unicode
836 836 The format of the image data (png/jpeg/jpg/gif). If a filename or URL is given
837 837 for format will be inferred from the filename extension.
838 838 embed : bool
839 839 Should the image data be embedded using a data URI (True) or be
840 840 loaded using an <img> tag. Set this to True if you want the image
841 841 to be viewable later with no internet connection in the notebook.
842 842
843 843 Default is `True`, unless the keyword argument `url` is set, then
844 844 default value is `False`.
845 845
846 846 Note that QtConsole is not able to display images if `embed` is set to `False`
847 847 width : int
848 848 Width in pixels to which to constrain the image in html
849 849 height : int
850 850 Height in pixels to which to constrain the image in html
851 851 retina : bool
852 852 Automatically set the width and height to half of the measured
853 853 width and height.
854 854 This only works for embedded images because it reads the width/height
855 855 from image data.
856 856 For non-embedded images, you can just set the desired display width
857 857 and height directly.
858 858 unconfined : bool
859 859 Set unconfined=True to disable max-width confinement of the image.
860 860 metadata : dict
861 861 Specify extra metadata to attach to the image.
862 862 alt : unicode
863 863 Alternative text for the image, for use by screen readers.
864 864
865 865 Examples
866 866 --------
867 867 embedded image data, works in qtconsole and notebook
868 868 when passed positionally, the first arg can be any of raw image data,
869 869 a URL, or a filename from which to load image data.
870 870 The result is always embedding image data for inline images.
871 871
872 872 >>> Image('http://www.google.fr/images/srpr/logo3w.png')
873 873 <IPython.core.display.Image object>
874 874
875 875 >>> Image('/path/to/image.jpg')
876 876 <IPython.core.display.Image object>
877 877
878 878 >>> Image(b'RAW_PNG_DATA...')
879 879 <IPython.core.display.Image object>
880 880
881 881 Specifying Image(url=...) does not embed the image data,
882 882 it only generates ``<img>`` tag with a link to the source.
883 883 This will not work in the qtconsole or offline.
884 884
885 885 >>> Image(url='http://www.google.fr/images/srpr/logo3w.png')
886 886 <IPython.core.display.Image object>
887 887
888 888 """
889 889 if isinstance(data, (Path, PurePath)):
890 890 data = str(data)
891 891
892 892 if filename is not None:
893 893 ext = self._find_ext(filename)
894 894 elif url is not None:
895 895 ext = self._find_ext(url)
896 896 elif data is None:
897 897 raise ValueError("No image data found. Expecting filename, url, or data.")
898 898 elif isinstance(data, str) and (
899 899 data.startswith('http') or _safe_exists(data)
900 900 ):
901 901 ext = self._find_ext(data)
902 902 else:
903 903 ext = None
904 904
905 905 if format is None:
906 906 if ext is not None:
907 907 if ext == u'jpg' or ext == u'jpeg':
908 908 format = self._FMT_JPEG
909 909 elif ext == u'png':
910 910 format = self._FMT_PNG
911 911 elif ext == u'gif':
912 912 format = self._FMT_GIF
913 913 else:
914 914 format = ext.lower()
915 915 elif isinstance(data, bytes):
916 916 # infer image type from image data header,
917 917 # only if format has not been specified.
918 918 if data[:2] == _JPEG:
919 919 format = self._FMT_JPEG
920 920
921 921 # failed to detect format, default png
922 922 if format is None:
923 923 format = self._FMT_PNG
924 924
925 925 if format.lower() == 'jpg':
926 926 # jpg->jpeg
927 927 format = self._FMT_JPEG
928 928
929 929 self.format = format.lower()
930 930 self.embed = embed if embed is not None else (url is None)
931 931
932 932 if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
933 933 raise ValueError("Cannot embed the '%s' image format" % (self.format))
934 934 if self.embed:
935 935 self._mimetype = self._MIMETYPES.get(self.format)
936 936
937 937 self.width = width
938 938 self.height = height
939 939 self.retina = retina
940 940 self.unconfined = unconfined
941 941 self.alt = alt
942 942 super(Image, self).__init__(data=data, url=url, filename=filename,
943 943 metadata=metadata)
944 944
945 945 if self.width is None and self.metadata.get('width', {}):
946 946 self.width = metadata['width']
947 947
948 948 if self.height is None and self.metadata.get('height', {}):
949 949 self.height = metadata['height']
950 950
951 951 if self.alt is None and self.metadata.get("alt", {}):
952 952 self.alt = metadata["alt"]
953 953
954 954 if retina:
955 955 self._retina_shape()
956 956
957 957
958 958 def _retina_shape(self):
959 959 """load pixel-doubled width and height from image data"""
960 960 if not self.embed:
961 961 return
962 962 if self.format == self._FMT_PNG:
963 963 w, h = _pngxy(self.data)
964 964 elif self.format == self._FMT_JPEG:
965 965 w, h = _jpegxy(self.data)
966 966 elif self.format == self._FMT_GIF:
967 967 w, h = _gifxy(self.data)
968 968 else:
969 969 # retina only supports png
970 970 return
971 971 self.width = w // 2
972 972 self.height = h // 2
973 973
974 974 def reload(self):
975 975 """Reload the raw data from file or URL."""
976 976 if self.embed:
977 977 super(Image,self).reload()
978 978 if self.retina:
979 979 self._retina_shape()
980 980
981 981 def _repr_html_(self):
982 982 if not self.embed:
983 983 width = height = klass = alt = ""
984 984 if self.width:
985 985 width = ' width="%d"' % self.width
986 986 if self.height:
987 987 height = ' height="%d"' % self.height
988 988 if self.unconfined:
989 989 klass = ' class="unconfined"'
990 990 if self.alt:
991 991 alt = ' alt="%s"' % html.escape(self.alt)
992 992 return '<img src="{url}"{width}{height}{klass}{alt}/>'.format(
993 993 url=self.url,
994 994 width=width,
995 995 height=height,
996 996 klass=klass,
997 997 alt=alt,
998 998 )
999 999
1000 1000 def _repr_mimebundle_(self, include=None, exclude=None):
1001 1001 """Return the image as a mimebundle
1002 1002
1003 1003 Any new mimetype support should be implemented here.
1004 1004 """
1005 1005 if self.embed:
1006 1006 mimetype = self._mimetype
1007 1007 data, metadata = self._data_and_metadata(always_both=True)
1008 1008 if metadata:
1009 1009 metadata = {mimetype: metadata}
1010 1010 return {mimetype: data}, metadata
1011 1011 else:
1012 1012 return {'text/html': self._repr_html_()}
1013 1013
1014 1014 def _data_and_metadata(self, always_both=False):
1015 1015 """shortcut for returning metadata with shape information, if defined"""
1016 1016 try:
1017 1017 b64_data = b2a_base64(self.data).decode('ascii')
1018 1018 except TypeError as e:
1019 1019 raise FileNotFoundError(
1020 1020 "No such file or directory: '%s'" % (self.data)) from e
1021 1021 md = {}
1022 1022 if self.metadata:
1023 1023 md.update(self.metadata)
1024 1024 if self.width:
1025 1025 md['width'] = self.width
1026 1026 if self.height:
1027 1027 md['height'] = self.height
1028 1028 if self.unconfined:
1029 1029 md['unconfined'] = self.unconfined
1030 1030 if self.alt:
1031 1031 md["alt"] = self.alt
1032 1032 if md or always_both:
1033 1033 return b64_data, md
1034 1034 else:
1035 1035 return b64_data
1036 1036
1037 1037 def _repr_png_(self):
1038 1038 if self.embed and self.format == self._FMT_PNG:
1039 1039 return self._data_and_metadata()
1040 1040
1041 1041 def _repr_jpeg_(self):
1042 1042 if self.embed and self.format == self._FMT_JPEG:
1043 1043 return self._data_and_metadata()
1044 1044
1045 1045 def _find_ext(self, s):
1046 1046 base, ext = splitext(s)
1047 1047
1048 1048 if not ext:
1049 1049 return base
1050 1050
1051 1051 # `splitext` includes leading period, so we skip it
1052 1052 return ext[1:].lower()
1053 1053
1054 1054
1055 1055 class Video(DisplayObject):
1056 1056
1057 1057 def __init__(self, data=None, url=None, filename=None, embed=False,
1058 1058 mimetype=None, width=None, height=None, html_attributes="controls"):
1059 1059 """Create a video object given raw data or an URL.
1060 1060
1061 1061 When this object is returned by an input cell or passed to the
1062 1062 display function, it will result in the video being displayed
1063 1063 in the frontend.
1064 1064
1065 1065 Parameters
1066 1066 ----------
1067 1067 data : unicode, str or bytes
1068 1068 The raw video data or a URL or filename to load the data from.
1069 1069 Raw data will require passing ``embed=True``.
1070 1070 url : unicode
1071 1071 A URL for the video. If you specify ``url=``,
1072 1072 the image data will not be embedded.
1073 1073 filename : unicode
1074 1074 Path to a local file containing the video.
1075 1075 Will be interpreted as a local URL unless ``embed=True``.
1076 1076 embed : bool
1077 1077 Should the video be embedded using a data URI (True) or be
1078 1078 loaded using a <video> tag (False).
1079 1079
1080 1080 Since videos are large, embedding them should be avoided, if possible.
1081 1081 You must confirm embedding as your intention by passing ``embed=True``.
1082 1082
1083 1083 Local files can be displayed with URLs without embedding the content, via::
1084 1084
1085 1085 Video('./video.mp4')
1086 1086 mimetype : unicode
1087 1087 Specify the mimetype for embedded videos.
1088 1088 Default will be guessed from file extension, if available.
1089 1089 width : int
1090 1090 Width in pixels to which to constrain the video in HTML.
1091 1091 If not supplied, defaults to the width of the video.
1092 1092 height : int
1093 1093 Height in pixels to which to constrain the video in html.
1094 1094 If not supplied, defaults to the height of the video.
1095 1095 html_attributes : str
1096 1096 Attributes for the HTML ``<video>`` block.
1097 1097 Default: ``"controls"`` to get video controls.
1098 1098 Other examples: ``"controls muted"`` for muted video with controls,
1099 1099 ``"loop autoplay"`` for looping autoplaying video without controls.
1100 1100
1101 1101 Examples
1102 1102 --------
1103 1103 ::
1104 1104
1105 1105 Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
1106 1106 Video('path/to/video.mp4')
1107 1107 Video('path/to/video.mp4', embed=True)
1108 1108 Video('path/to/video.mp4', embed=True, html_attributes="controls muted autoplay")
1109 1109 Video(b'raw-videodata', embed=True)
1110 1110 """
1111 1111 if isinstance(data, (Path, PurePath)):
1112 1112 data = str(data)
1113 1113
1114 1114 if url is None and isinstance(data, str) and data.startswith(('http:', 'https:')):
1115 1115 url = data
1116 1116 data = None
1117 1117 elif data is not None and os.path.exists(data):
1118 1118 filename = data
1119 1119 data = None
1120 1120
1121 1121 if data and not embed:
1122 1122 msg = ''.join([
1123 1123 "To embed videos, you must pass embed=True ",
1124 1124 "(this may make your notebook files huge)\n",
1125 1125 "Consider passing Video(url='...')",
1126 1126 ])
1127 1127 raise ValueError(msg)
1128 1128
1129 1129 self.mimetype = mimetype
1130 1130 self.embed = embed
1131 1131 self.width = width
1132 1132 self.height = height
1133 1133 self.html_attributes = html_attributes
1134 1134 super(Video, self).__init__(data=data, url=url, filename=filename)
1135 1135
1136 1136 def _repr_html_(self):
1137 1137 width = height = ''
1138 1138 if self.width:
1139 1139 width = ' width="%d"' % self.width
1140 1140 if self.height:
1141 1141 height = ' height="%d"' % self.height
1142 1142
1143 1143 # External URLs and potentially local files are not embedded into the
1144 1144 # notebook output.
1145 1145 if not self.embed:
1146 1146 url = self.url if self.url is not None else self.filename
1147 1147 output = """<video src="{0}" {1} {2} {3}>
1148 1148 Your browser does not support the <code>video</code> element.
1149 1149 </video>""".format(url, self.html_attributes, width, height)
1150 1150 return output
1151 1151
1152 1152 # Embedded videos are base64-encoded.
1153 1153 mimetype = self.mimetype
1154 1154 if self.filename is not None:
1155 1155 if not mimetype:
1156 1156 mimetype, _ = mimetypes.guess_type(self.filename)
1157 1157
1158 1158 with open(self.filename, 'rb') as f:
1159 1159 video = f.read()
1160 1160 else:
1161 1161 video = self.data
1162 1162 if isinstance(video, str):
1163 1163 # unicode input is already b64-encoded
1164 1164 b64_video = video
1165 1165 else:
1166 1166 b64_video = b2a_base64(video).decode('ascii').rstrip()
1167 1167
1168 1168 output = """<video {0} {1} {2}>
1169 1169 <source src="data:{3};base64,{4}" type="{3}">
1170 1170 Your browser does not support the video tag.
1171 1171 </video>""".format(self.html_attributes, width, height, mimetype, b64_video)
1172 1172 return output
1173 1173
1174 1174 def reload(self):
1175 1175 # TODO
1176 1176 pass
1177 1177
1178 1178
1179 1179 @skip_doctest
1180 1180 def set_matplotlib_formats(*formats, **kwargs):
1181 """Select figure formats for the inline backend. Optionally pass quality for JPEG.
1181 """
1182 .. deprecated:: 7.23
1183
1184 use `matplotlib_inline.backend_inline.set_matplotlib_formats()`
1185
1186 Select figure formats for the inline backend. Optionally pass quality for JPEG.
1182 1187
1183 1188 For example, this enables PNG and JPEG output with a JPEG quality of 90%::
1184 1189
1185 1190 In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
1186 1191
1187 1192 To set this in your config files use the following::
1188 1193
1189 1194 c.InlineBackend.figure_formats = {'png', 'jpeg'}
1190 1195 c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
1191 1196
1192 1197 Parameters
1193 1198 ----------
1194 1199 *formats : strs
1195 1200 One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
1196 1201 **kwargs
1197 1202 Keyword args will be relayed to ``figure.canvas.print_figure``.
1198 1203 """
1199 from IPython.core.interactiveshell import InteractiveShell
1200 from IPython.core.pylabtools import select_figure_formats
1201 # build kwargs, starting with InlineBackend config
1202 kw = {}
1203 from ipykernel.pylab.config import InlineBackend
1204 cfg = InlineBackend.instance()
1205 kw.update(cfg.print_figure_kwargs)
1206 kw.update(**kwargs)
1207 shell = InteractiveShell.instance()
1208 select_figure_formats(shell, formats, **kw)
1204 warnings.warn(
1205 "`set_matplotlib_formats` is deprecated since IPython 7.23, directly "
1206 "use `matplotlib_inline.backend_inline.set_matplotlib_formats()`",
1207 DeprecationWarning,
1208 stacklevel=2,
1209 )
1210
1211 from matplotlib_inline.backend_inline import (
1212 set_matplotlib_formats as set_matplotlib_formats_orig,
1213 )
1214
1215 set_matplotlib_formats_orig(*formats, **kwargs)
1209 1216
1210 1217 @skip_doctest
1211 1218 def set_matplotlib_close(close=True):
1212 """Set whether the inline backend closes all figures automatically or not.
1219 """
1220 .. deprecated:: 7.23
1221
1222 use `matplotlib_inline.backend_inline.set_matplotlib_close()`
1223
1224
1225 Set whether the inline backend closes all figures automatically or not.
1213 1226
1214 1227 By default, the inline backend used in the IPython Notebook will close all
1215 1228 matplotlib figures automatically after each cell is run. This means that
1216 1229 plots in different cells won't interfere. Sometimes, you may want to make
1217 1230 a plot in one cell and then refine it in later cells. This can be accomplished
1218 1231 by::
1219 1232
1220 1233 In [1]: set_matplotlib_close(False)
1221 1234
1222 1235 To set this in your config files use the following::
1223 1236
1224 1237 c.InlineBackend.close_figures = False
1225 1238
1226 1239 Parameters
1227 1240 ----------
1228 1241 close : bool
1229 1242 Should all matplotlib figures be automatically closed after each cell is
1230 1243 run?
1231 1244 """
1232 from ipykernel.pylab.config import InlineBackend
1233 cfg = InlineBackend.instance()
1234 cfg.close_figures = close
1245 warnings.warn(
1246 "`set_matplotlib_close` is deprecated since IPython 7.23, directly "
1247 "use `matplotlib_inline.backend_inline.set_matplotlib_close()`",
1248 DeprecationWarning,
1249 stacklevel=2,
1250 )
1251
1252 from matplotlib_inline.backend_inline import (
1253 set_matplotlib_close as set_matplotlib_close_orig,
1254 )
1255
1256 set_matplotlib_close_orig(close)
@@ -1,367 +1,374 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Top-level display functions for displaying object in different formats."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7
8 8 from binascii import b2a_hex
9 9 import os
10 10 import sys
11 11
12 12 __all__ = ['display', 'clear_output', 'publish_display_data', 'update_display', 'DisplayHandle']
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # utility functions
16 16 #-----------------------------------------------------------------------------
17 17
18 18
19 19 def _merge(d1, d2):
20 20 """Like update, but merges sub-dicts instead of clobbering at the top level.
21 21
22 22 Updates d1 in-place
23 23 """
24 24
25 25 if not isinstance(d2, dict) or not isinstance(d1, dict):
26 26 return d2
27 27 for key, value in d2.items():
28 28 d1[key] = _merge(d1.get(key), value)
29 29 return d1
30 30
31 31
32 32 #-----------------------------------------------------------------------------
33 33 # Main functions
34 34 #-----------------------------------------------------------------------------
35 35
36 36
37 37 # use * to indicate transient is keyword-only
38 38 def publish_display_data(data, metadata=None, source=None, *, transient=None, **kwargs):
39 39 """Publish data and metadata to all frontends.
40 40
41 41 See the ``display_data`` message in the messaging documentation for
42 42 more details about this message type.
43 43
44 44 Keys of data and metadata can be any mime-type.
45 45
46 46 Parameters
47 47 ----------
48 48 data : dict
49 49 A dictionary having keys that are valid MIME types (like
50 50 'text/plain' or 'image/svg+xml') and values that are the data for
51 51 that MIME type. The data itself must be a JSON'able data
52 52 structure. Minimally all data should have the 'text/plain' data,
53 53 which can be displayed by all frontends. If more than the plain
54 54 text is given, it is up to the frontend to decide which
55 55 representation to use.
56 56 metadata : dict
57 57 A dictionary for metadata related to the data. This can contain
58 58 arbitrary key, value pairs that frontends can use to interpret
59 59 the data. mime-type keys matching those in data can be used
60 60 to specify metadata about particular representations.
61 61 source : str, deprecated
62 62 Unused.
63 63 transient : dict, keyword-only
64 64 A dictionary of transient data, such as display_id.
65 """
65 """
66 66 from IPython.core.interactiveshell import InteractiveShell
67 67
68 68 display_pub = InteractiveShell.instance().display_pub
69 69
70 70 # only pass transient if supplied,
71 71 # to avoid errors with older ipykernel.
72 72 # TODO: We could check for ipykernel version and provide a detailed upgrade message.
73 73 if transient:
74 74 kwargs['transient'] = transient
75 75
76 76 display_pub.publish(
77 77 data=data,
78 78 metadata=metadata,
79 79 **kwargs
80 80 )
81 81
82 82
83 83 def _new_id():
84 84 """Generate a new random text id with urandom"""
85 85 return b2a_hex(os.urandom(16)).decode('ascii')
86 86
87 87
88 def display(*objs, include=None, exclude=None, metadata=None, transient=None, display_id=None, **kwargs):
88 def display(
89 *objs,
90 include=None,
91 exclude=None,
92 metadata=None,
93 transient=None,
94 display_id=None,
95 raw=False,
96 clear=False,
97 **kwargs
98 ):
89 99 """Display a Python object in all frontends.
90 100
91 101 By default all representations will be computed and sent to the frontends.
92 102 Frontends can decide which representation is used and how.
93 103
94 104 In terminal IPython this will be similar to using :func:`print`, for use in richer
95 105 frontends see Jupyter notebook examples with rich display logic.
96 106
97 107 Parameters
98 108 ----------
99 109 *objs : object
100 110 The Python objects to display.
101 111 raw : bool, optional
102 112 Are the objects to be displayed already mimetype-keyed dicts of raw display data,
103 113 or Python objects that need to be formatted before display? [default: False]
104 114 include : list, tuple or set, optional
105 115 A list of format type strings (MIME types) to include in the
106 116 format data dict. If this is set *only* the format types included
107 117 in this list will be computed.
108 118 exclude : list, tuple or set, optional
109 119 A list of format type strings (MIME types) to exclude in the format
110 120 data dict. If this is set all format types will be computed,
111 121 except for those included in this argument.
112 122 metadata : dict, optional
113 123 A dictionary of metadata to associate with the output.
114 124 mime-type keys in this dictionary will be associated with the individual
115 125 representation formats, if they exist.
116 126 transient : dict, optional
117 127 A dictionary of transient data to associate with the output.
118 128 Data in this dict should not be persisted to files (e.g. notebooks).
119 129 display_id : str, bool optional
120 130 Set an id for the display.
121 131 This id can be used for updating this display area later via update_display.
122 132 If given as `True`, generate a new `display_id`
123 kwargs: additional keyword-args, optional
133 clear : bool, optional
134 Should the output area be cleared before displaying anything? If True,
135 this will wait for additional output before clearing. [default: False]
136 **kwargs : additional keyword-args, optional
124 137 Additional keyword-arguments are passed through to the display publisher.
125 138
126 139 Returns
127 140 -------
128
129 141 handle: DisplayHandle
130 142 Returns a handle on updatable displays for use with :func:`update_display`,
131 143 if `display_id` is given. Returns :any:`None` if no `display_id` is given
132 144 (default).
133 145
134 146 Examples
135 147 --------
136
137 148 >>> class Json(object):
138 149 ... def __init__(self, json):
139 150 ... self.json = json
140 151 ... def _repr_pretty_(self, pp, cycle):
141 152 ... import json
142 153 ... pp.text(json.dumps(self.json, indent=2))
143 154 ... def __repr__(self):
144 155 ... return str(self.json)
145 156 ...
146 157
147 158 >>> d = Json({1:2, 3: {4:5}})
148 159
149 160 >>> print(d)
150 161 {1: 2, 3: {4: 5}}
151 162
152 163 >>> display(d)
153 164 {
154 165 "1": 2,
155 166 "3": {
156 167 "4": 5
157 168 }
158 169 }
159 170
160 171 >>> def int_formatter(integer, pp, cycle):
161 172 ... pp.text('I'*integer)
162 173
163 174 >>> plain = get_ipython().display_formatter.formatters['text/plain']
164 175 >>> plain.for_type(int, int_formatter)
165 176 <function _repr_pprint at 0x...>
166 177 >>> display(7-5)
167 178 II
168 179
169 180 >>> del plain.type_printers[int]
170 181 >>> display(7-5)
171 182 2
172 183
173 184 See Also
174 185 --------
175
176 186 :func:`update_display`
177 187
178 188 Notes
179 189 -----
180
181 190 In Python, objects can declare their textual representation using the
182 191 `__repr__` method. IPython expands on this idea and allows objects to declare
183 192 other, rich representations including:
184 193
185 194 - HTML
186 195 - JSON
187 196 - PNG
188 197 - JPEG
189 198 - SVG
190 199 - LaTeX
191 200
192 201 A single object can declare some or all of these representations; all are
193 202 handled by IPython's display system.
194 203
195 204 The main idea of the first approach is that you have to implement special
196 205 display methods when you define your class, one for each representation you
197 206 want to use. Here is a list of the names of the special methods and the
198 207 values they must return:
199 208
200 209 - `_repr_html_`: return raw HTML as a string, or a tuple (see below).
201 210 - `_repr_json_`: return a JSONable dict, or a tuple (see below).
202 211 - `_repr_jpeg_`: return raw JPEG data, or a tuple (see below).
203 212 - `_repr_png_`: return raw PNG data, or a tuple (see below).
204 213 - `_repr_svg_`: return raw SVG data as a string, or a tuple (see below).
205 214 - `_repr_latex_`: return LaTeX commands in a string surrounded by "$",
206 215 or a tuple (see below).
207 216 - `_repr_mimebundle_`: return a full mimebundle containing the mapping
208 217 from all mimetypes to data.
209 218 Use this for any mime-type not listed above.
210 219
211 220 The above functions may also return the object's metadata alonside the
212 221 data. If the metadata is available, the functions will return a tuple
213 222 containing the data and metadata, in that order. If there is no metadata
214 223 available, then the functions will return the data only.
215 224
216 225 When you are directly writing your own classes, you can adapt them for
217 226 display in IPython by following the above approach. But in practice, you
218 227 often need to work with existing classes that you can't easily modify.
219 228
220 229 You can refer to the documentation on integrating with the display system in
221 230 order to register custom formatters for already existing types
222 231 (:ref:`integrating_rich_display`).
223 232
224 233 .. versionadded:: 5.4 display available without import
225 234 .. versionadded:: 6.1 display available without import
226 235
227 236 Since IPython 5.4 and 6.1 :func:`display` is automatically made available to
228 237 the user without import. If you are using display in a document that might
229 238 be used in a pure python context or with older version of IPython, use the
230 239 following import at the top of your file::
231 240
232 241 from IPython.display import display
233 242
234 243 """
235 244 from IPython.core.interactiveshell import InteractiveShell
236 245
237 246 if not InteractiveShell.initialized():
238 247 # Directly print objects.
239 248 print(*objs)
240 249 return
241 250
242 raw = kwargs.pop('raw', False)
243 251 if transient is None:
244 252 transient = {}
245 253 if metadata is None:
246 254 metadata={}
247 255 if display_id:
248 256 if display_id is True:
249 257 display_id = _new_id()
250 258 transient['display_id'] = display_id
251 259 if kwargs.get('update') and 'display_id' not in transient:
252 260 raise TypeError('display_id required for update_display')
253 261 if transient:
254 262 kwargs['transient'] = transient
255 263
256 264 if not objs and display_id:
257 265 # if given no objects, but still a request for a display_id,
258 266 # we assume the user wants to insert an empty output that
259 267 # can be updated later
260 268 objs = [{}]
261 269 raw = True
262 270
263 271 if not raw:
264 272 format = InteractiveShell.instance().display_formatter.format
265 273
274 if clear:
275 clear_output(wait=True)
276
266 277 for obj in objs:
267 278 if raw:
268 279 publish_display_data(data=obj, metadata=metadata, **kwargs)
269 280 else:
270 281 format_dict, md_dict = format(obj, include=include, exclude=exclude)
271 282 if not format_dict:
272 283 # nothing to display (e.g. _ipython_display_ took over)
273 284 continue
274 285 if metadata:
275 286 # kwarg-specified metadata gets precedence
276 287 _merge(md_dict, metadata)
277 288 publish_display_data(data=format_dict, metadata=md_dict, **kwargs)
278 289 if display_id:
279 290 return DisplayHandle(display_id)
280 291
281 292
282 293 # use * for keyword-only display_id arg
283 294 def update_display(obj, *, display_id, **kwargs):
284 295 """Update an existing display by id
285 296
286 297 Parameters
287 298 ----------
288
289 obj:
299 obj
290 300 The object with which to update the display
291 display_id: keyword-only
301 display_id : keyword-only
292 302 The id of the display to update
293 303
294 304 See Also
295 305 --------
296
297 306 :func:`display`
298 307 """
299 308 kwargs['update'] = True
300 309 display(obj, display_id=display_id, **kwargs)
301 310
302 311
303 312 class DisplayHandle(object):
304 313 """A handle on an updatable display
305 314
306 315 Call `.update(obj)` to display a new object.
307 316
308 317 Call `.display(obj`) to add a new instance of this display,
309 318 and update existing instances.
310 319
311 320 See Also
312 321 --------
313 322
314 323 :func:`display`, :func:`update_display`
315 324
316 325 """
317 326
318 327 def __init__(self, display_id=None):
319 328 if display_id is None:
320 329 display_id = _new_id()
321 330 self.display_id = display_id
322 331
323 332 def __repr__(self):
324 333 return "<%s display_id=%s>" % (self.__class__.__name__, self.display_id)
325 334
326 335 def display(self, obj, **kwargs):
327 336 """Make a new display with my id, updating existing instances.
328 337
329 338 Parameters
330 339 ----------
331
332 obj:
340 obj
333 341 object to display
334 **kwargs:
342 **kwargs
335 343 additional keyword arguments passed to display
336 344 """
337 345 display(obj, display_id=self.display_id, **kwargs)
338 346
339 347 def update(self, obj, **kwargs):
340 348 """Update existing displays with my id
341 349
342 350 Parameters
343 351 ----------
344
345 obj:
352 obj
346 353 object to display
347 **kwargs:
354 **kwargs
348 355 additional keyword arguments passed to update_display
349 356 """
350 357 update_display(obj, display_id=self.display_id, **kwargs)
351 358
352 359
353 360 def clear_output(wait=False):
354 361 """Clear the output of the current cell receiving output.
355 362
356 363 Parameters
357 364 ----------
358 365 wait : bool [default: false]
359 366 Wait to clear the output until new output is available to replace it."""
360 367 from IPython.core.interactiveshell import InteractiveShell
361 368 if InteractiveShell.initialized():
362 369 InteractiveShell.instance().display_pub.clear_output(wait)
363 370 else:
364 371 print('\033[2K\r', end='')
365 372 sys.stdout.flush()
366 373 print('\033[2K\r', end='')
367 374 sys.stderr.flush()
@@ -1,325 +1,325 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Displayhook for IPython.
3 3
4 4 This defines a callable class that IPython uses for `sys.displayhook`.
5 5 """
6 6
7 7 # Copyright (c) IPython Development Team.
8 8 # Distributed under the terms of the Modified BSD License.
9 9
10 10 import builtins as builtin_mod
11 11 import sys
12 12 import io as _io
13 13 import tokenize
14 14
15 15 from traitlets.config.configurable import Configurable
16 16 from traitlets import Instance, Float
17 17 from warnings import warn
18 18
19 19 # TODO: Move the various attributes (cache_size, [others now moved]). Some
20 20 # of these are also attributes of InteractiveShell. They should be on ONE object
21 21 # only and the other objects should ask that one object for their values.
22 22
23 23 class DisplayHook(Configurable):
24 24 """The custom IPython displayhook to replace sys.displayhook.
25 25
26 26 This class does many things, but the basic idea is that it is a callable
27 27 that gets called anytime user code returns a value.
28 28 """
29 29
30 30 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
31 31 allow_none=True)
32 32 exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',
33 33 allow_none=True)
34 34 cull_fraction = Float(0.2)
35 35
36 36 def __init__(self, shell=None, cache_size=1000, **kwargs):
37 37 super(DisplayHook, self).__init__(shell=shell, **kwargs)
38 38 cache_size_min = 3
39 39 if cache_size <= 0:
40 40 self.do_full_cache = 0
41 41 cache_size = 0
42 42 elif cache_size < cache_size_min:
43 43 self.do_full_cache = 0
44 44 cache_size = 0
45 45 warn('caching was disabled (min value for cache size is %s).' %
46 46 cache_size_min,stacklevel=3)
47 47 else:
48 48 self.do_full_cache = 1
49 49
50 50 self.cache_size = cache_size
51 51
52 52 # we need a reference to the user-level namespace
53 53 self.shell = shell
54 54
55 55 self._,self.__,self.___ = '','',''
56 56
57 57 # these are deliberately global:
58 58 to_user_ns = {'_':self._,'__':self.__,'___':self.___}
59 59 self.shell.user_ns.update(to_user_ns)
60 60
61 61 @property
62 62 def prompt_count(self):
63 63 return self.shell.execution_count
64 64
65 65 #-------------------------------------------------------------------------
66 66 # Methods used in __call__. Override these methods to modify the behavior
67 67 # of the displayhook.
68 68 #-------------------------------------------------------------------------
69 69
70 70 def check_for_underscore(self):
71 71 """Check if the user has set the '_' variable by hand."""
72 72 # If something injected a '_' variable in __builtin__, delete
73 73 # ipython's automatic one so we don't clobber that. gettext() in
74 74 # particular uses _, so we need to stay away from it.
75 75 if '_' in builtin_mod.__dict__:
76 76 try:
77 77 user_value = self.shell.user_ns['_']
78 78 if user_value is not self._:
79 79 return
80 80 del self.shell.user_ns['_']
81 81 except KeyError:
82 82 pass
83 83
84 84 def quiet(self):
85 85 """Should we silence the display hook because of ';'?"""
86 86 # do not print output if input ends in ';'
87 87
88 88 try:
89 89 cell = self.shell.history_manager.input_hist_parsed[-1]
90 90 except IndexError:
91 91 # some uses of ipshellembed may fail here
92 92 return False
93 93
94 94 sio = _io.StringIO(cell)
95 95 tokens = list(tokenize.generate_tokens(sio.readline))
96 96
97 97 for token in reversed(tokens):
98 98 if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):
99 99 continue
100 100 if (token[0] == tokenize.OP) and (token[1] == ';'):
101 101 return True
102 102 else:
103 103 return False
104 104
105 105 def start_displayhook(self):
106 106 """Start the displayhook, initializing resources."""
107 107 pass
108 108
109 109 def write_output_prompt(self):
110 110 """Write the output prompt.
111 111
112 112 The default implementation simply writes the prompt to
113 113 ``sys.stdout``.
114 114 """
115 115 # Use write, not print which adds an extra space.
116 116 sys.stdout.write(self.shell.separate_out)
117 117 outprompt = 'Out[{}]: '.format(self.shell.execution_count)
118 118 if self.do_full_cache:
119 119 sys.stdout.write(outprompt)
120 120
121 121 def compute_format_data(self, result):
122 122 """Compute format data of the object to be displayed.
123 123
124 124 The format data is a generalization of the :func:`repr` of an object.
125 125 In the default implementation the format data is a :class:`dict` of
126 126 key value pair where the keys are valid MIME types and the values
127 127 are JSON'able data structure containing the raw data for that MIME
128 128 type. It is up to frontends to determine pick a MIME to to use and
129 129 display that data in an appropriate manner.
130 130
131 131 This method only computes the format data for the object and should
132 132 NOT actually print or write that to a stream.
133 133
134 134 Parameters
135 135 ----------
136 136 result : object
137 137 The Python object passed to the display hook, whose format will be
138 138 computed.
139 139
140 140 Returns
141 141 -------
142 142 (format_dict, md_dict) : dict
143 143 format_dict is a :class:`dict` whose keys are valid MIME types and values are
144 144 JSON'able raw data for that MIME type. It is recommended that
145 145 all return values of this should always include the "text/plain"
146 146 MIME type representation of the object.
147 147 md_dict is a :class:`dict` with the same MIME type keys
148 148 of metadata associated with each output.
149
149
150 150 """
151 151 return self.shell.display_formatter.format(result)
152 152
153 153 # This can be set to True by the write_output_prompt method in a subclass
154 154 prompt_end_newline = False
155 155
156 156 def write_format_data(self, format_dict, md_dict=None) -> None:
157 157 """Write the format data dict to the frontend.
158 158
159 159 This default version of this method simply writes the plain text
160 160 representation of the object to ``sys.stdout``. Subclasses should
161 161 override this method to send the entire `format_dict` to the
162 162 frontends.
163 163
164 164 Parameters
165 165 ----------
166 166 format_dict : dict
167 167 The format dict for the object passed to `sys.displayhook`.
168 168 md_dict : dict (optional)
169 169 The metadata dict to be associated with the display data.
170 170 """
171 171 if 'text/plain' not in format_dict:
172 172 # nothing to do
173 173 return
174 174 # We want to print because we want to always make sure we have a
175 175 # newline, even if all the prompt separators are ''. This is the
176 176 # standard IPython behavior.
177 177 result_repr = format_dict['text/plain']
178 178 if '\n' in result_repr:
179 179 # So that multi-line strings line up with the left column of
180 180 # the screen, instead of having the output prompt mess up
181 181 # their first line.
182 182 # We use the prompt template instead of the expanded prompt
183 183 # because the expansion may add ANSI escapes that will interfere
184 184 # with our ability to determine whether or not we should add
185 185 # a newline.
186 186 if not self.prompt_end_newline:
187 187 # But avoid extraneous empty lines.
188 188 result_repr = '\n' + result_repr
189 189
190 190 try:
191 191 print(result_repr)
192 192 except UnicodeEncodeError:
193 193 # If a character is not supported by the terminal encoding replace
194 194 # it with its \u or \x representation
195 195 print(result_repr.encode(sys.stdout.encoding,'backslashreplace').decode(sys.stdout.encoding))
196 196
197 197 def update_user_ns(self, result):
198 198 """Update user_ns with various things like _, __, _1, etc."""
199 199
200 200 # Avoid recursive reference when displaying _oh/Out
201 201 if self.cache_size and result is not self.shell.user_ns['_oh']:
202 202 if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
203 203 self.cull_cache()
204 204
205 205 # Don't overwrite '_' and friends if '_' is in __builtin__
206 206 # (otherwise we cause buggy behavior for things like gettext). and
207 207 # do not overwrite _, __ or ___ if one of these has been assigned
208 208 # by the user.
209 209 update_unders = True
210 210 for unders in ['_'*i for i in range(1,4)]:
211 211 if not unders in self.shell.user_ns:
212 212 continue
213 213 if getattr(self, unders) is not self.shell.user_ns.get(unders):
214 214 update_unders = False
215 215
216 216 self.___ = self.__
217 217 self.__ = self._
218 218 self._ = result
219 219
220 220 if ('_' not in builtin_mod.__dict__) and (update_unders):
221 221 self.shell.push({'_':self._,
222 222 '__':self.__,
223 223 '___':self.___}, interactive=False)
224 224
225 225 # hackish access to top-level namespace to create _1,_2... dynamically
226 226 to_main = {}
227 227 if self.do_full_cache:
228 228 new_result = '_%s' % self.prompt_count
229 229 to_main[new_result] = result
230 230 self.shell.push(to_main, interactive=False)
231 231 self.shell.user_ns['_oh'][self.prompt_count] = result
232 232
233 233 def fill_exec_result(self, result):
234 234 if self.exec_result is not None:
235 235 self.exec_result.result = result
236 236
237 237 def log_output(self, format_dict):
238 238 """Log the output."""
239 239 if 'text/plain' not in format_dict:
240 240 # nothing to do
241 241 return
242 242 if self.shell.logger.log_output:
243 243 self.shell.logger.log_write(format_dict['text/plain'], 'output')
244 244 self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
245 245 format_dict['text/plain']
246 246
247 247 def finish_displayhook(self):
248 248 """Finish up all displayhook activities."""
249 249 sys.stdout.write(self.shell.separate_out2)
250 250 sys.stdout.flush()
251 251
252 252 def __call__(self, result=None):
253 253 """Printing with history cache management.
254 254
255 255 This is invoked every time the interpreter needs to print, and is
256 256 activated by setting the variable sys.displayhook to it.
257 257 """
258 258 self.check_for_underscore()
259 259 if result is not None and not self.quiet():
260 260 self.start_displayhook()
261 261 self.write_output_prompt()
262 262 format_dict, md_dict = self.compute_format_data(result)
263 263 self.update_user_ns(result)
264 264 self.fill_exec_result(result)
265 265 if format_dict:
266 266 self.write_format_data(format_dict, md_dict)
267 267 self.log_output(format_dict)
268 268 self.finish_displayhook()
269 269
270 270 def cull_cache(self):
271 271 """Output cache is full, cull the oldest entries"""
272 272 oh = self.shell.user_ns.get('_oh', {})
273 273 sz = len(oh)
274 274 cull_count = max(int(sz * self.cull_fraction), 2)
275 275 warn('Output cache limit (currently {sz} entries) hit.\n'
276 276 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
277 277
278 278 for i, n in enumerate(sorted(oh)):
279 279 if i >= cull_count:
280 280 break
281 281 self.shell.user_ns.pop('_%i' % n, None)
282 282 oh.pop(n, None)
283 283
284 284
285 285 def flush(self):
286 286 if not self.do_full_cache:
287 287 raise ValueError("You shouldn't have reached the cache flush "
288 288 "if full caching is not enabled!")
289 289 # delete auto-generated vars from global namespace
290 290
291 291 for n in range(1,self.prompt_count + 1):
292 292 key = '_'+repr(n)
293 293 try:
294 294 del self.shell.user_ns[key]
295 295 except: pass
296 296 # In some embedded circumstances, the user_ns doesn't have the
297 297 # '_oh' key set up.
298 298 oh = self.shell.user_ns.get('_oh', None)
299 299 if oh is not None:
300 300 oh.clear()
301 301
302 302 # Release our own references to objects:
303 303 self._, self.__, self.___ = '', '', ''
304 304
305 305 if '_' not in builtin_mod.__dict__:
306 306 self.shell.user_ns.update({'_':self._,'__':self.__,'___':self.___})
307 307 import gc
308 308 # TODO: Is this really needed?
309 309 # IronPython blocks here forever
310 310 if sys.platform != "cli":
311 311 gc.collect()
312 312
313 313
314 314 class CapturingDisplayHook(object):
315 315 def __init__(self, shell, outputs=None):
316 316 self.shell = shell
317 317 if outputs is None:
318 318 outputs = []
319 319 self.outputs = outputs
320 320
321 321 def __call__(self, result=None):
322 322 if result is None:
323 323 return
324 324 format_dict, md_dict = self.shell.display_formatter.format(result)
325 325 self.outputs.append({ 'data': format_dict, 'metadata': md_dict })
@@ -1,138 +1,138 b''
1 1 """An interface for publishing rich data to frontends.
2 2
3 3 There are two components of the display system:
4 4
5 5 * Display formatters, which take a Python object and compute the
6 6 representation of the object in various formats (text, HTML, SVG, etc.).
7 7 * The display publisher that is used to send the representation data to the
8 8 various frontends.
9 9
10 10 This module defines the logic display publishing. The display publisher uses
11 11 the ``display_data`` message type that is defined in the IPython messaging
12 12 spec.
13 13 """
14 14
15 15 # Copyright (c) IPython Development Team.
16 16 # Distributed under the terms of the Modified BSD License.
17 17
18 18
19 19 import sys
20 20
21 21 from traitlets.config.configurable import Configurable
22 22 from traitlets import List
23 23
24 24 # This used to be defined here - it is imported for backwards compatibility
25 25 from .display_functions import publish_display_data
26 26
27 27 #-----------------------------------------------------------------------------
28 28 # Main payload class
29 29 #-----------------------------------------------------------------------------
30 30
31 31
32 32 class DisplayPublisher(Configurable):
33 33 """A traited class that publishes display data to frontends.
34 34
35 35 Instances of this class are created by the main IPython object and should
36 36 be accessed there.
37 37 """
38 38
39 39 def __init__(self, shell=None, *args, **kwargs):
40 40 self.shell = shell
41 41 super().__init__(*args, **kwargs)
42 42
43 43 def _validate_data(self, data, metadata=None):
44 44 """Validate the display data.
45 45
46 46 Parameters
47 47 ----------
48 48 data : dict
49 49 The formata data dictionary.
50 50 metadata : dict
51 51 Any metadata for the data.
52 52 """
53 53
54 54 if not isinstance(data, dict):
55 55 raise TypeError('data must be a dict, got: %r' % data)
56 56 if metadata is not None:
57 57 if not isinstance(metadata, dict):
58 58 raise TypeError('metadata must be a dict, got: %r' % data)
59 59
60 60 # use * to indicate transient, update are keyword-only
61 61 def publish(self, data, metadata=None, source=None, *, transient=None, update=False, **kwargs) -> None:
62 62 """Publish data and metadata to all frontends.
63 63
64 64 See the ``display_data`` message in the messaging documentation for
65 65 more details about this message type.
66 66
67 67 The following MIME types are currently implemented:
68 68
69 69 * text/plain
70 70 * text/html
71 71 * text/markdown
72 72 * text/latex
73 73 * application/json
74 74 * application/javascript
75 75 * image/png
76 76 * image/jpeg
77 77 * image/svg+xml
78 78
79 79 Parameters
80 80 ----------
81 81 data : dict
82 82 A dictionary having keys that are valid MIME types (like
83 83 'text/plain' or 'image/svg+xml') and values that are the data for
84 84 that MIME type. The data itself must be a JSON'able data
85 85 structure. Minimally all data should have the 'text/plain' data,
86 86 which can be displayed by all frontends. If more than the plain
87 87 text is given, it is up to the frontend to decide which
88 88 representation to use.
89 89 metadata : dict
90 90 A dictionary for metadata related to the data. This can contain
91 91 arbitrary key, value pairs that frontends can use to interpret
92 92 the data. Metadata specific to each mime-type can be specified
93 93 in the metadata dict with the same mime-type keys as
94 94 the data itself.
95 95 source : str, deprecated
96 96 Unused.
97 transient: dict, keyword-only
97 transient : dict, keyword-only
98 98 A dictionary for transient data.
99 99 Data in this dictionary should not be persisted as part of saving this output.
100 100 Examples include 'display_id'.
101 update: bool, keyword-only, default: False
101 update : bool, keyword-only, default: False
102 102 If True, only update existing outputs with the same display_id,
103 103 rather than creating a new output.
104 104 """
105 105
106 106 handlers = {}
107 107 if self.shell is not None:
108 108 handlers = getattr(self.shell, 'mime_renderers', {})
109 109
110 110 for mime, handler in handlers.items():
111 111 if mime in data:
112 112 handler(data[mime], metadata.get(mime, None))
113 113 return
114 114
115 115 if 'text/plain' in data:
116 116 print(data['text/plain'])
117 117
118 118 def clear_output(self, wait=False):
119 119 """Clear the output of the cell receiving output."""
120 120 print('\033[2K\r', end='')
121 121 sys.stdout.flush()
122 122 print('\033[2K\r', end='')
123 123 sys.stderr.flush()
124 124
125 125
126 126 class CapturingDisplayPublisher(DisplayPublisher):
127 127 """A DisplayPublisher that stores"""
128 128 outputs = List()
129 129
130 130 def publish(self, data, metadata=None, source=None, *, transient=None, update=False):
131 131 self.outputs.append({'data':data, 'metadata':metadata,
132 132 'transient':transient, 'update':update})
133 133
134 134 def clear_output(self, wait=False):
135 135 super(CapturingDisplayPublisher, self).clear_output(wait)
136 136
137 137 # empty the list, *do not* reassign a new list
138 138 self.outputs.clear()
@@ -1,161 +1,161 b''
1 1 """Infrastructure for registering and firing callbacks on application events.
2 2
3 3 Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
4 4 be called at specific times, or a collection of alternative methods to try,
5 5 callbacks are designed to be used by extension authors. A number of callbacks
6 6 can be registered for the same event without needing to be aware of one another.
7 7
8 8 The functions defined in this module are no-ops indicating the names of available
9 9 events and the arguments which will be passed to them.
10 10
11 11 .. note::
12 12
13 13 This API is experimental in IPython 2.0, and may be revised in future versions.
14 14 """
15 15
16 16 from backcall import callback_prototype
17 17
18 18
19 19 class EventManager(object):
20 20 """Manage a collection of events and a sequence of callbacks for each.
21 21
22 22 This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
23 23 instances as an ``events`` attribute.
24 24
25 25 .. note::
26 26
27 27 This API is experimental in IPython 2.0, and may be revised in future versions.
28 28 """
29 29 def __init__(self, shell, available_events):
30 30 """Initialise the :class:`CallbackManager`.
31
31
32 32 Parameters
33 33 ----------
34 34 shell
35 The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
36 available_callbacks
37 An iterable of names for callback events.
35 The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
36 available_events
37 An iterable of names for callback events.
38 38 """
39 39 self.shell = shell
40 40 self.callbacks = {n:[] for n in available_events}
41 41
42 42 def register(self, event, function):
43 43 """Register a new event callback.
44
44
45 45 Parameters
46 46 ----------
47 47 event : str
48 The event for which to register this callback.
48 The event for which to register this callback.
49 49 function : callable
50 A function to be called on the given event. It should take the same
51 parameters as the appropriate callback prototype.
52
50 A function to be called on the given event. It should take the same
51 parameters as the appropriate callback prototype.
52
53 53 Raises
54 54 ------
55 55 TypeError
56 If ``function`` is not callable.
56 If ``function`` is not callable.
57 57 KeyError
58 If ``event`` is not one of the known events.
58 If ``event`` is not one of the known events.
59 59 """
60 60 if not callable(function):
61 61 raise TypeError('Need a callable, got %r' % function)
62 62 callback_proto = available_events.get(event)
63 63 if function not in self.callbacks[event]:
64 64 self.callbacks[event].append(callback_proto.adapt(function))
65 65
66 66 def unregister(self, event, function):
67 67 """Remove a callback from the given event."""
68 68 if function in self.callbacks[event]:
69 69 return self.callbacks[event].remove(function)
70 70
71 71 # Remove callback in case ``function`` was adapted by `backcall`.
72 72 for callback in self.callbacks[event]:
73 73 try:
74 74 if callback.__wrapped__ is function:
75 75 return self.callbacks[event].remove(callback)
76 76 except AttributeError:
77 77 pass
78 78
79 79 raise ValueError('Function {!r} is not registered as a {} callback'.format(function, event))
80 80
81 81 def trigger(self, event, *args, **kwargs):
82 82 """Call callbacks for ``event``.
83
83
84 84 Any additional arguments are passed to all callbacks registered for this
85 85 event. Exceptions raised by callbacks are caught, and a message printed.
86 86 """
87 87 for func in self.callbacks[event][:]:
88 88 try:
89 89 func(*args, **kwargs)
90 90 except (Exception, KeyboardInterrupt):
91 91 print("Error in callback {} (for {}):".format(func, event))
92 92 self.shell.showtraceback()
93 93
94 94 # event_name -> prototype mapping
95 95 available_events = {}
96 96
97 97 def _define_event(callback_function):
98 98 callback_proto = callback_prototype(callback_function)
99 99 available_events[callback_function.__name__] = callback_proto
100 100 return callback_proto
101 101
102 102 # ------------------------------------------------------------------------------
103 103 # Callback prototypes
104 104 #
105 105 # No-op functions which describe the names of available events and the
106 106 # signatures of callbacks for those events.
107 107 # ------------------------------------------------------------------------------
108 108
109 109 @_define_event
110 110 def pre_execute():
111 111 """Fires before code is executed in response to user/frontend action.
112
112
113 113 This includes comm and widget messages and silent execution, as well as user
114 114 code cells.
115 115 """
116 116 pass
117 117
118 118 @_define_event
119 119 def pre_run_cell(info):
120 120 """Fires before user-entered code runs.
121 121
122 122 Parameters
123 123 ----------
124 124 info : :class:`~IPython.core.interactiveshell.ExecutionInfo`
125 An object containing information used for the code execution.
125 An object containing information used for the code execution.
126 126 """
127 127 pass
128 128
129 129 @_define_event
130 130 def post_execute():
131 131 """Fires after code is executed in response to user/frontend action.
132
132
133 133 This includes comm and widget messages and silent execution, as well as user
134 134 code cells.
135 135 """
136 136 pass
137 137
138 138 @_define_event
139 139 def post_run_cell(result):
140 140 """Fires after user-entered code runs.
141 141
142 142 Parameters
143 143 ----------
144 144 result : :class:`~IPython.core.interactiveshell.ExecutionResult`
145 The object which will be returned as the execution result.
145 The object which will be returned as the execution result.
146 146 """
147 147 pass
148 148
149 149 @_define_event
150 150 def shell_initialized(ip):
151 151 """Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
152
152
153 153 This is before extensions and startup scripts are loaded, so it can only be
154 154 set by subclassing.
155
155
156 156 Parameters
157 157 ----------
158 158 ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
159 The newly initialised shell.
159 The newly initialised shell.
160 160 """
161 161 pass
@@ -1,150 +1,150 b''
1 1 # encoding: utf-8
2 2 """A class for managing IPython extensions."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import os.path
9 9 import sys
10 10 from importlib import import_module, reload
11 11
12 12 from traitlets.config.configurable import Configurable
13 13 from IPython.utils.path import ensure_dir_exists, compress_user
14 14 from IPython.utils.decorators import undoc
15 15 from traitlets import Instance
16 16
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Main class
20 20 #-----------------------------------------------------------------------------
21 21
22 22 class ExtensionManager(Configurable):
23 23 """A class to manage IPython extensions.
24 24
25 25 An IPython extension is an importable Python module that has
26 26 a function with the signature::
27 27
28 28 def load_ipython_extension(ipython):
29 29 # Do things with ipython
30 30
31 31 This function is called after your extension is imported and the
32 32 currently active :class:`InteractiveShell` instance is passed as
33 33 the only argument. You can do anything you want with IPython at
34 34 that point, including defining new magic and aliases, adding new
35 35 components, etc.
36 36
37 37 You can also optionally define an :func:`unload_ipython_extension(ipython)`
38 38 function, which will be called if the user unloads or reloads the extension.
39 39 The extension manager will only call :func:`load_ipython_extension` again
40 40 if the extension is reloaded.
41 41
42 42 You can put your extension modules anywhere you want, as long as
43 43 they can be imported by Python's standard import mechanism. However,
44 44 to make it easy to write extensions, you can also put your extensions
45 45 in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
46 46 is added to ``sys.path`` automatically.
47 47 """
48 48
49 49 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
50 50
51 51 def __init__(self, shell=None, **kwargs):
52 52 super(ExtensionManager, self).__init__(shell=shell, **kwargs)
53 53 self.shell.observe(
54 54 self._on_ipython_dir_changed, names=('ipython_dir',)
55 55 )
56 56 self.loaded = set()
57 57
58 58 @property
59 59 def ipython_extension_dir(self):
60 60 return os.path.join(self.shell.ipython_dir, u'extensions')
61 61
62 62 def _on_ipython_dir_changed(self, change):
63 63 ensure_dir_exists(self.ipython_extension_dir)
64 64
65 65 def load_extension(self, module_str):
66 66 """Load an IPython extension by its module name.
67 67
68 68 Returns the string "already loaded" if the extension is already loaded,
69 69 "no load function" if the module doesn't have a load_ipython_extension
70 70 function, or None if it succeeded.
71 71 """
72 72 if module_str in self.loaded:
73 73 return "already loaded"
74 74
75 75 from IPython.utils.syspathcontext import prepended_to_syspath
76 76
77 77 with self.shell.builtin_trap:
78 78 if module_str not in sys.modules:
79 79 with prepended_to_syspath(self.ipython_extension_dir):
80 80 mod = import_module(module_str)
81 81 if mod.__file__.startswith(self.ipython_extension_dir):
82 82 print(("Loading extensions from {dir} is deprecated. "
83 83 "We recommend managing extensions like any "
84 84 "other Python packages, in site-packages.").format(
85 85 dir=compress_user(self.ipython_extension_dir)))
86 86 mod = sys.modules[module_str]
87 87 if self._call_load_ipython_extension(mod):
88 88 self.loaded.add(module_str)
89 89 else:
90 90 return "no load function"
91 91
92 92 def unload_extension(self, module_str):
93 93 """Unload an IPython extension by its module name.
94 94
95 95 This function looks up the extension's name in ``sys.modules`` and
96 96 simply calls ``mod.unload_ipython_extension(self)``.
97
97
98 98 Returns the string "no unload function" if the extension doesn't define
99 99 a function to unload itself, "not loaded" if the extension isn't loaded,
100 100 otherwise None.
101 101 """
102 102 if module_str not in self.loaded:
103 103 return "not loaded"
104 104
105 105 if module_str in sys.modules:
106 106 mod = sys.modules[module_str]
107 107 if self._call_unload_ipython_extension(mod):
108 108 self.loaded.discard(module_str)
109 109 else:
110 110 return "no unload function"
111 111
112 112 def reload_extension(self, module_str):
113 113 """Reload an IPython extension by calling reload.
114 114
115 115 If the module has not been loaded before,
116 116 :meth:`InteractiveShell.load_extension` is called. Otherwise
117 117 :func:`reload` is called and then the :func:`load_ipython_extension`
118 118 function of the module, if it exists is called.
119 119 """
120 120 from IPython.utils.syspathcontext import prepended_to_syspath
121 121
122 122 if (module_str in self.loaded) and (module_str in sys.modules):
123 123 self.unload_extension(module_str)
124 124 mod = sys.modules[module_str]
125 125 with prepended_to_syspath(self.ipython_extension_dir):
126 126 reload(mod)
127 127 if self._call_load_ipython_extension(mod):
128 128 self.loaded.add(module_str)
129 129 else:
130 130 self.load_extension(module_str)
131 131
132 132 def _call_load_ipython_extension(self, mod):
133 133 if hasattr(mod, 'load_ipython_extension'):
134 134 mod.load_ipython_extension(self.shell)
135 135 return True
136 136
137 137 def _call_unload_ipython_extension(self, mod):
138 138 if hasattr(mod, 'unload_ipython_extension'):
139 139 mod.unload_ipython_extension(self.shell)
140 140 return True
141 141
142 142 @undoc
143 143 def install_extension(self, url, filename=None):
144 144 """
145 145 Deprecated.
146 146 """
147 147 # Ensure the extension directory exists
148 148 raise DeprecationWarning(
149 149 '`install_extension` and the `install_ext` magic have been deprecated since IPython 4.0'
150 150 'Use pip or other package managers to manage ipython extensions.')
@@ -1,1024 +1,1026 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Display formatters.
3 3
4 4 Inheritance diagram:
5 5
6 6 .. inheritance-diagram:: IPython.core.formatters
7 7 :parts: 3
8 8 """
9 9
10 10 # Copyright (c) IPython Development Team.
11 11 # Distributed under the terms of the Modified BSD License.
12 12
13 13 import abc
14 14 import json
15 15 import sys
16 16 import traceback
17 17 import warnings
18 18 from io import StringIO
19 19
20 20 from decorator import decorator
21 21
22 22 from traitlets.config.configurable import Configurable
23 23 from .getipython import get_ipython
24 24 from ..utils.sentinel import Sentinel
25 25 from ..utils.dir2 import get_real_method
26 26 from ..lib import pretty
27 27 from traitlets import (
28 28 Bool, Dict, Integer, Unicode, CUnicode, ObjectName, List,
29 29 ForwardDeclaredInstance,
30 30 default, observe,
31 31 )
32 32
33 33
34 34 class DisplayFormatter(Configurable):
35 35
36 36 active_types = List(Unicode(),
37 37 help="""List of currently active mime-types to display.
38 38 You can use this to set a white-list for formats to display.
39 39
40 40 Most users will not need to change this value.
41 41 """).tag(config=True)
42 42
43 43 @default('active_types')
44 44 def _active_types_default(self):
45 45 return self.format_types
46 46
47 47 @observe('active_types')
48 48 def _active_types_changed(self, change):
49 49 for key, formatter in self.formatters.items():
50 50 if key in change['new']:
51 51 formatter.enabled = True
52 52 else:
53 53 formatter.enabled = False
54 54
55 55 ipython_display_formatter = ForwardDeclaredInstance('FormatterABC')
56 56 @default('ipython_display_formatter')
57 57 def _default_formatter(self):
58 58 return IPythonDisplayFormatter(parent=self)
59 59
60 60 mimebundle_formatter = ForwardDeclaredInstance('FormatterABC')
61 61 @default('mimebundle_formatter')
62 62 def _default_mime_formatter(self):
63 63 return MimeBundleFormatter(parent=self)
64 64
65 65 # A dict of formatter whose keys are format types (MIME types) and whose
66 66 # values are subclasses of BaseFormatter.
67 67 formatters = Dict()
68 68 @default('formatters')
69 69 def _formatters_default(self):
70 70 """Activate the default formatters."""
71 71 formatter_classes = [
72 72 PlainTextFormatter,
73 73 HTMLFormatter,
74 74 MarkdownFormatter,
75 75 SVGFormatter,
76 76 PNGFormatter,
77 77 PDFFormatter,
78 78 JPEGFormatter,
79 79 LatexFormatter,
80 80 JSONFormatter,
81 81 JavascriptFormatter
82 82 ]
83 83 d = {}
84 84 for cls in formatter_classes:
85 85 f = cls(parent=self)
86 86 d[f.format_type] = f
87 87 return d
88 88
89 89 def format(self, obj, include=None, exclude=None):
90 90 """Return a format data dict for an object.
91 91
92 92 By default all format types will be computed.
93 93
94 94 The following MIME types are usually implemented:
95 95
96 96 * text/plain
97 97 * text/html
98 98 * text/markdown
99 99 * text/latex
100 100 * application/json
101 101 * application/javascript
102 102 * application/pdf
103 103 * image/png
104 104 * image/jpeg
105 105 * image/svg+xml
106 106
107 107 Parameters
108 108 ----------
109 109 obj : object
110 110 The Python object whose format data will be computed.
111 111 include : list, tuple or set; optional
112 112 A list of format type strings (MIME types) to include in the
113 113 format data dict. If this is set *only* the format types included
114 114 in this list will be computed.
115 115 exclude : list, tuple or set; optional
116 116 A list of format type string (MIME types) to exclude in the format
117 117 data dict. If this is set all format types will be computed,
118 118 except for those included in this argument.
119 119 Mimetypes present in exclude will take precedence over the ones in include
120 120
121 121 Returns
122 122 -------
123 123 (format_dict, metadata_dict) : tuple of two dicts
124
125 124 format_dict is a dictionary of key/value pairs, one of each format that was
126 125 generated for the object. The keys are the format types, which
127 126 will usually be MIME type strings and the values and JSON'able
128 127 data structure containing the raw data for the representation in
129 128 that format.
130
129
131 130 metadata_dict is a dictionary of metadata about each mime-type output.
132 131 Its keys will be a strict subset of the keys in format_dict.
133 132
134 133 Notes
135 134 -----
136
137 135 If an object implement `_repr_mimebundle_` as well as various
138 136 `_repr_*_`, the data returned by `_repr_mimebundle_` will take
139 137 precedence and the corresponding `_repr_*_` for this mimetype will
140 138 not be called.
141 139
142 140 """
143 141 format_dict = {}
144 142 md_dict = {}
145 143
146 144 if self.ipython_display_formatter(obj):
147 145 # object handled itself, don't proceed
148 146 return {}, {}
149 147
150 148 format_dict, md_dict = self.mimebundle_formatter(obj, include=include, exclude=exclude)
151 149
152 150 if format_dict or md_dict:
153 151 if include:
154 152 format_dict = {k:v for k,v in format_dict.items() if k in include}
155 153 md_dict = {k:v for k,v in md_dict.items() if k in include}
156 154 if exclude:
157 155 format_dict = {k:v for k,v in format_dict.items() if k not in exclude}
158 156 md_dict = {k:v for k,v in md_dict.items() if k not in exclude}
159 157
160 158 for format_type, formatter in self.formatters.items():
161 159 if format_type in format_dict:
162 160 # already got it from mimebundle, maybe don't render again.
163 161 # exception: manually registered per-mime renderer
164 162 # check priority:
165 163 # 1. user-registered per-mime formatter
166 164 # 2. mime-bundle (user-registered or repr method)
167 165 # 3. default per-mime formatter (e.g. repr method)
168 166 try:
169 167 formatter.lookup(obj)
170 168 except KeyError:
171 169 # no special formatter, use mime-bundle-provided value
172 170 continue
173 171 if include and format_type not in include:
174 172 continue
175 173 if exclude and format_type in exclude:
176 174 continue
177 175
178 176 md = None
179 177 try:
180 178 data = formatter(obj)
181 179 except:
182 180 # FIXME: log the exception
183 181 raise
184 182
185 183 # formatters can return raw data or (data, metadata)
186 184 if isinstance(data, tuple) and len(data) == 2:
187 185 data, md = data
188 186
189 187 if data is not None:
190 188 format_dict[format_type] = data
191 189 if md is not None:
192 190 md_dict[format_type] = md
193 191 return format_dict, md_dict
194 192
195 193 @property
196 194 def format_types(self):
197 195 """Return the format types (MIME types) of the active formatters."""
198 196 return list(self.formatters.keys())
199 197
200 198
201 199 #-----------------------------------------------------------------------------
202 200 # Formatters for specific format types (text, html, svg, etc.)
203 201 #-----------------------------------------------------------------------------
204 202
205 203
206 204 def _safe_repr(obj):
207 205 """Try to return a repr of an object
208 206
209 207 always returns a string, at least.
210 208 """
211 209 try:
212 210 return repr(obj)
213 211 except Exception as e:
214 212 return "un-repr-able object (%r)" % e
215 213
216 214
217 215 class FormatterWarning(UserWarning):
218 216 """Warning class for errors in formatters"""
219 217
220 218 @decorator
221 219 def catch_format_error(method, self, *args, **kwargs):
222 220 """show traceback on failed format call"""
223 221 try:
224 222 r = method(self, *args, **kwargs)
225 223 except NotImplementedError:
226 224 # don't warn on NotImplementedErrors
227 225 return self._check_return(None, args[0])
228 226 except Exception:
229 227 exc_info = sys.exc_info()
230 228 ip = get_ipython()
231 229 if ip is not None:
232 230 ip.showtraceback(exc_info)
233 231 else:
234 232 traceback.print_exception(*exc_info)
235 233 return self._check_return(None, args[0])
236 234 return self._check_return(r, args[0])
237 235
238 236
239 237 class FormatterABC(metaclass=abc.ABCMeta):
240 238 """ Abstract base class for Formatters.
241 239
242 240 A formatter is a callable class that is responsible for computing the
243 241 raw format data for a particular format type (MIME type). For example,
244 242 an HTML formatter would have a format type of `text/html` and would return
245 243 the HTML representation of the object when called.
246 244 """
247 245
248 246 # The format type of the data returned, usually a MIME type.
249 247 format_type = 'text/plain'
250 248
251 249 # Is the formatter enabled...
252 250 enabled = True
253 251
254 252 @abc.abstractmethod
255 253 def __call__(self, obj):
256 254 """Return a JSON'able representation of the object.
257 255
258 256 If the object cannot be formatted by this formatter,
259 257 warn and return None.
260 258 """
261 259 return repr(obj)
262 260
263 261
264 262 def _mod_name_key(typ):
265 263 """Return a (__module__, __name__) tuple for a type.
266
264
267 265 Used as key in Formatter.deferred_printers.
268 266 """
269 267 module = getattr(typ, '__module__', None)
270 268 name = getattr(typ, '__name__', None)
271 269 return (module, name)
272 270
273 271
274 272 def _get_type(obj):
275 273 """Return the type of an instance (old and new-style)"""
276 274 return getattr(obj, '__class__', None) or type(obj)
277 275
278 276
279 277 _raise_key_error = Sentinel('_raise_key_error', __name__,
280 278 """
281 279 Special value to raise a KeyError
282 280
283 281 Raise KeyError in `BaseFormatter.pop` if passed as the default value to `pop`
284 282 """)
285 283
286 284
287 285 class BaseFormatter(Configurable):
288 286 """A base formatter class that is configurable.
289 287
290 288 This formatter should usually be used as the base class of all formatters.
291 289 It is a traited :class:`Configurable` class and includes an extensible
292 290 API for users to determine how their objects are formatted. The following
293 291 logic is used to find a function to format an given object.
294 292
295 293 1. The object is introspected to see if it has a method with the name
296 294 :attr:`print_method`. If is does, that object is passed to that method
297 295 for formatting.
298 296 2. If no print method is found, three internal dictionaries are consulted
299 297 to find print method: :attr:`singleton_printers`, :attr:`type_printers`
300 298 and :attr:`deferred_printers`.
301 299
302 300 Users should use these dictionaries to register functions that will be
303 301 used to compute the format data for their objects (if those objects don't
304 302 have the special print methods). The easiest way of using these
305 303 dictionaries is through the :meth:`for_type` and :meth:`for_type_by_name`
306 304 methods.
307 305
308 306 If no function/callable is found to compute the format data, ``None`` is
309 307 returned and this format type is not used.
310 308 """
311 309
312 310 format_type = Unicode('text/plain')
313 311 _return_type = str
314 312
315 313 enabled = Bool(True).tag(config=True)
316 314
317 315 print_method = ObjectName('__repr__')
318 316
319 317 # The singleton printers.
320 318 # Maps the IDs of the builtin singleton objects to the format functions.
321 319 singleton_printers = Dict().tag(config=True)
322 320
323 321 # The type-specific printers.
324 322 # Map type objects to the format functions.
325 323 type_printers = Dict().tag(config=True)
326 324
327 325 # The deferred-import type-specific printers.
328 326 # Map (modulename, classname) pairs to the format functions.
329 327 deferred_printers = Dict().tag(config=True)
330 328
331 329 @catch_format_error
332 330 def __call__(self, obj):
333 331 """Compute the format for an object."""
334 332 if self.enabled:
335 333 # lookup registered printer
336 334 try:
337 335 printer = self.lookup(obj)
338 336 except KeyError:
339 337 pass
340 338 else:
341 339 return printer(obj)
342 340 # Finally look for special method names
343 341 method = get_real_method(obj, self.print_method)
344 342 if method is not None:
345 343 return method()
346 344 return None
347 345 else:
348 346 return None
349 347
350 348 def __contains__(self, typ):
351 349 """map in to lookup_by_type"""
352 350 try:
353 351 self.lookup_by_type(typ)
354 352 except KeyError:
355 353 return False
356 354 else:
357 355 return True
358 356
359 357 def _check_return(self, r, obj):
360 358 """Check that a return value is appropriate
361
359
362 360 Return the value if so, None otherwise, warning if invalid.
363 361 """
364 362 if r is None or isinstance(r, self._return_type) or \
365 363 (isinstance(r, tuple) and r and isinstance(r[0], self._return_type)):
366 364 return r
367 365 else:
368 366 warnings.warn(
369 367 "%s formatter returned invalid type %s (expected %s) for object: %s" % \
370 368 (self.format_type, type(r), self._return_type, _safe_repr(obj)),
371 369 FormatterWarning
372 370 )
373 371
374 372 def lookup(self, obj):
375 373 """Look up the formatter for a given instance.
376
374
377 375 Parameters
378 376 ----------
379 obj : object instance
377 obj : object instance
380 378
381 379 Returns
382 380 -------
383 381 f : callable
384 382 The registered formatting callable for the type.
385 383
386 384 Raises
387 385 ------
388 386 KeyError if the type has not been registered.
389 387 """
390 388 # look for singleton first
391 389 obj_id = id(obj)
392 390 if obj_id in self.singleton_printers:
393 391 return self.singleton_printers[obj_id]
394 392 # then lookup by type
395 393 return self.lookup_by_type(_get_type(obj))
396 394
397 395 def lookup_by_type(self, typ):
398 396 """Look up the registered formatter for a type.
399 397
400 398 Parameters
401 399 ----------
402 typ : type or '__module__.__name__' string for a type
400 typ : type or '__module__.__name__' string for a type
403 401
404 402 Returns
405 403 -------
406 404 f : callable
407 405 The registered formatting callable for the type.
408 406
409 407 Raises
410 408 ------
411 409 KeyError if the type has not been registered.
412 410 """
413 411 if isinstance(typ, str):
414 412 typ_key = tuple(typ.rsplit('.',1))
415 413 if typ_key not in self.deferred_printers:
416 414 # We may have it cached in the type map. We will have to
417 415 # iterate over all of the types to check.
418 416 for cls in self.type_printers:
419 417 if _mod_name_key(cls) == typ_key:
420 418 return self.type_printers[cls]
421 419 else:
422 420 return self.deferred_printers[typ_key]
423 421 else:
424 422 for cls in pretty._get_mro(typ):
425 423 if cls in self.type_printers or self._in_deferred_types(cls):
426 424 return self.type_printers[cls]
427 425
428 426 # If we have reached here, the lookup failed.
429 427 raise KeyError("No registered printer for {0!r}".format(typ))
430 428
431 429 def for_type(self, typ, func=None):
432 430 """Add a format function for a given type.
433
431
434 432 Parameters
435 433 ----------
436 434 typ : type or '__module__.__name__' string for a type
437 435 The class of the object that will be formatted using `func`.
438 436 func : callable
439 437 A callable for computing the format data.
440 438 `func` will be called with the object to be formatted,
441 439 and will return the raw data in this formatter's format.
442 440 Subclasses may use a different call signature for the
443 441 `func` argument.
444
442
445 443 If `func` is None or not specified, there will be no change,
446 444 only returning the current value.
447
445
448 446 Returns
449 447 -------
450 448 oldfunc : callable
451 449 The currently registered callable.
452 450 If you are registering a new formatter,
453 451 this will be the previous value (to enable restoring later).
454 452 """
455 453 # if string given, interpret as 'pkg.module.class_name'
456 454 if isinstance(typ, str):
457 455 type_module, type_name = typ.rsplit('.', 1)
458 456 return self.for_type_by_name(type_module, type_name, func)
459 457
460 458 try:
461 459 oldfunc = self.lookup_by_type(typ)
462 460 except KeyError:
463 461 oldfunc = None
464 462
465 463 if func is not None:
466 464 self.type_printers[typ] = func
467 465
468 466 return oldfunc
469 467
470 468 def for_type_by_name(self, type_module, type_name, func=None):
471 469 """Add a format function for a type specified by the full dotted
472 470 module and name of the type, rather than the type of the object.
473 471
474 472 Parameters
475 473 ----------
476 474 type_module : str
477 475 The full dotted name of the module the type is defined in, like
478 476 ``numpy``.
479 477 type_name : str
480 478 The name of the type (the class name), like ``dtype``
481 479 func : callable
482 480 A callable for computing the format data.
483 481 `func` will be called with the object to be formatted,
484 482 and will return the raw data in this formatter's format.
485 483 Subclasses may use a different call signature for the
486 484 `func` argument.
487
485
488 486 If `func` is None or unspecified, there will be no change,
489 487 only returning the current value.
490
488
491 489 Returns
492 490 -------
493 491 oldfunc : callable
494 492 The currently registered callable.
495 493 If you are registering a new formatter,
496 494 this will be the previous value (to enable restoring later).
497 495 """
498 496 key = (type_module, type_name)
499 497
500 498 try:
501 499 oldfunc = self.lookup_by_type("%s.%s" % key)
502 500 except KeyError:
503 501 oldfunc = None
504 502
505 503 if func is not None:
506 504 self.deferred_printers[key] = func
507 505 return oldfunc
508 506
509 507 def pop(self, typ, default=_raise_key_error):
510 508 """Pop a formatter for the given type.
511 509
512 510 Parameters
513 511 ----------
514 512 typ : type or '__module__.__name__' string for a type
515 513 default : object
516 514 value to be returned if no formatter is registered for typ.
517 515
518 516 Returns
519 517 -------
520 518 obj : object
521 519 The last registered object for the type.
522 520
523 521 Raises
524 522 ------
525 523 KeyError if the type is not registered and default is not specified.
526 524 """
527 525
528 526 if isinstance(typ, str):
529 527 typ_key = tuple(typ.rsplit('.',1))
530 528 if typ_key not in self.deferred_printers:
531 529 # We may have it cached in the type map. We will have to
532 530 # iterate over all of the types to check.
533 531 for cls in self.type_printers:
534 532 if _mod_name_key(cls) == typ_key:
535 533 old = self.type_printers.pop(cls)
536 534 break
537 535 else:
538 536 old = default
539 537 else:
540 538 old = self.deferred_printers.pop(typ_key)
541 539 else:
542 540 if typ in self.type_printers:
543 541 old = self.type_printers.pop(typ)
544 542 else:
545 543 old = self.deferred_printers.pop(_mod_name_key(typ), default)
546 544 if old is _raise_key_error:
547 545 raise KeyError("No registered value for {0!r}".format(typ))
548 546 return old
549 547
550 548 def _in_deferred_types(self, cls):
551 549 """
552 550 Check if the given class is specified in the deferred type registry.
553 551
554 552 Successful matches will be moved to the regular type registry for future use.
555 553 """
556 554 mod = getattr(cls, '__module__', None)
557 555 name = getattr(cls, '__name__', None)
558 556 key = (mod, name)
559 557 if key in self.deferred_printers:
560 558 # Move the printer over to the regular registry.
561 559 printer = self.deferred_printers.pop(key)
562 560 self.type_printers[cls] = printer
563 561 return True
564 562 return False
565 563
566 564
567 565 class PlainTextFormatter(BaseFormatter):
568 566 """The default pretty-printer.
569 567
570 568 This uses :mod:`IPython.lib.pretty` to compute the format data of
571 569 the object. If the object cannot be pretty printed, :func:`repr` is used.
572 570 See the documentation of :mod:`IPython.lib.pretty` for details on
573 571 how to write pretty printers. Here is a simple example::
574 572
575 573 def dtype_pprinter(obj, p, cycle):
576 574 if cycle:
577 575 return p.text('dtype(...)')
578 576 if hasattr(obj, 'fields'):
579 577 if obj.fields is None:
580 578 p.text(repr(obj))
581 579 else:
582 580 p.begin_group(7, 'dtype([')
583 581 for i, field in enumerate(obj.descr):
584 582 if i > 0:
585 583 p.text(',')
586 584 p.breakable()
587 585 p.pretty(field)
588 586 p.end_group(7, '])')
589 587 """
590 588
591 589 # The format type of data returned.
592 590 format_type = Unicode('text/plain')
593 591
594 592 # This subclass ignores this attribute as it always need to return
595 593 # something.
596 594 enabled = Bool(True).tag(config=False)
597 595
598 596 max_seq_length = Integer(pretty.MAX_SEQ_LENGTH,
599 597 help="""Truncate large collections (lists, dicts, tuples, sets) to this size.
600 598
601 599 Set to 0 to disable truncation.
602 600 """
603 601 ).tag(config=True)
604 602
605 603 # Look for a _repr_pretty_ methods to use for pretty printing.
606 604 print_method = ObjectName('_repr_pretty_')
607 605
608 606 # Whether to pretty-print or not.
609 607 pprint = Bool(True).tag(config=True)
610 608
611 609 # Whether to be verbose or not.
612 610 verbose = Bool(False).tag(config=True)
613 611
614 612 # The maximum width.
615 613 max_width = Integer(79).tag(config=True)
616 614
617 615 # The newline character.
618 616 newline = Unicode('\n').tag(config=True)
619 617
620 618 # format-string for pprinting floats
621 619 float_format = Unicode('%r')
622 620 # setter for float precision, either int or direct format-string
623 621 float_precision = CUnicode('').tag(config=True)
624 622
625 623 @observe('float_precision')
626 624 def _float_precision_changed(self, change):
627 625 """float_precision changed, set float_format accordingly.
628 626
629 627 float_precision can be set by int or str.
630 628 This will set float_format, after interpreting input.
631 629 If numpy has been imported, numpy print precision will also be set.
632 630
633 631 integer `n` sets format to '%.nf', otherwise, format set directly.
634 632
635 633 An empty string returns to defaults (repr for float, 8 for numpy).
636 634
637 635 This parameter can be set via the '%precision' magic.
638 636 """
639
640 637 new = change['new']
641 638 if '%' in new:
642 639 # got explicit format string
643 640 fmt = new
644 641 try:
645 642 fmt%3.14159
646 643 except Exception as e:
647 644 raise ValueError("Precision must be int or format string, not %r"%new) from e
648 645 elif new:
649 646 # otherwise, should be an int
650 647 try:
651 648 i = int(new)
652 649 assert i >= 0
653 650 except ValueError as e:
654 651 raise ValueError("Precision must be int or format string, not %r"%new) from e
655 652 except AssertionError as e:
656 653 raise ValueError("int precision must be non-negative, not %r"%i) from e
657 654
658 655 fmt = '%%.%if'%i
659 656 if 'numpy' in sys.modules:
660 657 # set numpy precision if it has been imported
661 658 import numpy
662 659 numpy.set_printoptions(precision=i)
663 660 else:
664 661 # default back to repr
665 662 fmt = '%r'
666 663 if 'numpy' in sys.modules:
667 664 import numpy
668 665 # numpy default is 8
669 666 numpy.set_printoptions(precision=8)
670 667 self.float_format = fmt
671 668
672 669 # Use the default pretty printers from IPython.lib.pretty.
673 670 @default('singleton_printers')
674 671 def _singleton_printers_default(self):
675 672 return pretty._singleton_pprinters.copy()
676 673
677 674 @default('type_printers')
678 675 def _type_printers_default(self):
679 676 d = pretty._type_pprinters.copy()
680 677 d[float] = lambda obj,p,cycle: p.text(self.float_format%obj)
678 # if NumPy is used, set precision for its float64 type
679 if "numpy" in sys.modules:
680 import numpy
681
682 d[numpy.float64] = lambda obj, p, cycle: p.text(self.float_format % obj)
681 683 return d
682 684
683 685 @default('deferred_printers')
684 686 def _deferred_printers_default(self):
685 687 return pretty._deferred_type_pprinters.copy()
686 688
687 689 #### FormatterABC interface ####
688 690
689 691 @catch_format_error
690 692 def __call__(self, obj):
691 693 """Compute the pretty representation of the object."""
692 694 if not self.pprint:
693 695 return repr(obj)
694 696 else:
695 697 stream = StringIO()
696 698 printer = pretty.RepresentationPrinter(stream, self.verbose,
697 699 self.max_width, self.newline,
698 700 max_seq_length=self.max_seq_length,
699 701 singleton_pprinters=self.singleton_printers,
700 702 type_pprinters=self.type_printers,
701 703 deferred_pprinters=self.deferred_printers)
702 704 printer.pretty(obj)
703 705 printer.flush()
704 706 return stream.getvalue()
705 707
706 708
707 709 class HTMLFormatter(BaseFormatter):
708 710 """An HTML formatter.
709 711
710 712 To define the callables that compute the HTML representation of your
711 713 objects, define a :meth:`_repr_html_` method or use the :meth:`for_type`
712 714 or :meth:`for_type_by_name` methods to register functions that handle
713 715 this.
714 716
715 717 The return value of this formatter should be a valid HTML snippet that
716 718 could be injected into an existing DOM. It should *not* include the
717 719 ```<html>`` or ```<body>`` tags.
718 720 """
719 721 format_type = Unicode('text/html')
720 722
721 723 print_method = ObjectName('_repr_html_')
722 724
723 725
724 726 class MarkdownFormatter(BaseFormatter):
725 727 """A Markdown formatter.
726 728
727 729 To define the callables that compute the Markdown representation of your
728 730 objects, define a :meth:`_repr_markdown_` method or use the :meth:`for_type`
729 731 or :meth:`for_type_by_name` methods to register functions that handle
730 732 this.
731 733
732 734 The return value of this formatter should be a valid Markdown.
733 735 """
734 736 format_type = Unicode('text/markdown')
735 737
736 738 print_method = ObjectName('_repr_markdown_')
737 739
738 740 class SVGFormatter(BaseFormatter):
739 741 """An SVG formatter.
740 742
741 743 To define the callables that compute the SVG representation of your
742 744 objects, define a :meth:`_repr_svg_` method or use the :meth:`for_type`
743 745 or :meth:`for_type_by_name` methods to register functions that handle
744 746 this.
745 747
746 748 The return value of this formatter should be valid SVG enclosed in
747 749 ```<svg>``` tags, that could be injected into an existing DOM. It should
748 750 *not* include the ```<html>`` or ```<body>`` tags.
749 751 """
750 752 format_type = Unicode('image/svg+xml')
751 753
752 754 print_method = ObjectName('_repr_svg_')
753 755
754 756
755 757 class PNGFormatter(BaseFormatter):
756 758 """A PNG formatter.
757 759
758 760 To define the callables that compute the PNG representation of your
759 761 objects, define a :meth:`_repr_png_` method or use the :meth:`for_type`
760 762 or :meth:`for_type_by_name` methods to register functions that handle
761 763 this.
762 764
763 765 The return value of this formatter should be raw PNG data, *not*
764 766 base64 encoded.
765 767 """
766 768 format_type = Unicode('image/png')
767 769
768 770 print_method = ObjectName('_repr_png_')
769 771
770 772 _return_type = (bytes, str)
771 773
772 774
773 775 class JPEGFormatter(BaseFormatter):
774 776 """A JPEG formatter.
775 777
776 778 To define the callables that compute the JPEG representation of your
777 779 objects, define a :meth:`_repr_jpeg_` method or use the :meth:`for_type`
778 780 or :meth:`for_type_by_name` methods to register functions that handle
779 781 this.
780 782
781 783 The return value of this formatter should be raw JPEG data, *not*
782 784 base64 encoded.
783 785 """
784 786 format_type = Unicode('image/jpeg')
785 787
786 788 print_method = ObjectName('_repr_jpeg_')
787 789
788 790 _return_type = (bytes, str)
789 791
790 792
791 793 class LatexFormatter(BaseFormatter):
792 794 """A LaTeX formatter.
793 795
794 796 To define the callables that compute the LaTeX representation of your
795 797 objects, define a :meth:`_repr_latex_` method or use the :meth:`for_type`
796 798 or :meth:`for_type_by_name` methods to register functions that handle
797 799 this.
798 800
799 801 The return value of this formatter should be a valid LaTeX equation,
800 802 enclosed in either ```$```, ```$$``` or another LaTeX equation
801 803 environment.
802 804 """
803 805 format_type = Unicode('text/latex')
804 806
805 807 print_method = ObjectName('_repr_latex_')
806 808
807 809
808 810 class JSONFormatter(BaseFormatter):
809 811 """A JSON string formatter.
810 812
811 813 To define the callables that compute the JSONable representation of
812 814 your objects, define a :meth:`_repr_json_` method or use the :meth:`for_type`
813 815 or :meth:`for_type_by_name` methods to register functions that handle
814 816 this.
815 817
816 818 The return value of this formatter should be a JSONable list or dict.
817 819 JSON scalars (None, number, string) are not allowed, only dict or list containers.
818 820 """
819 821 format_type = Unicode('application/json')
820 822 _return_type = (list, dict)
821 823
822 824 print_method = ObjectName('_repr_json_')
823 825
824 826 def _check_return(self, r, obj):
825 827 """Check that a return value is appropriate
826
828
827 829 Return the value if so, None otherwise, warning if invalid.
828 830 """
829 831 if r is None:
830 832 return
831 833 md = None
832 834 if isinstance(r, tuple):
833 835 # unpack data, metadata tuple for type checking on first element
834 836 r, md = r
835 837
836 838 # handle deprecated JSON-as-string form from IPython < 3
837 839 if isinstance(r, str):
838 840 warnings.warn("JSON expects JSONable list/dict containers, not JSON strings",
839 841 FormatterWarning)
840 842 r = json.loads(r)
841 843
842 844 if md is not None:
843 845 # put the tuple back together
844 846 r = (r, md)
845 847 return super(JSONFormatter, self)._check_return(r, obj)
846 848
847 849
848 850 class JavascriptFormatter(BaseFormatter):
849 851 """A Javascript formatter.
850 852
851 853 To define the callables that compute the Javascript representation of
852 854 your objects, define a :meth:`_repr_javascript_` method or use the
853 855 :meth:`for_type` or :meth:`for_type_by_name` methods to register functions
854 856 that handle this.
855 857
856 858 The return value of this formatter should be valid Javascript code and
857 859 should *not* be enclosed in ```<script>``` tags.
858 860 """
859 861 format_type = Unicode('application/javascript')
860 862
861 863 print_method = ObjectName('_repr_javascript_')
862 864
863 865
864 866 class PDFFormatter(BaseFormatter):
865 867 """A PDF formatter.
866 868
867 869 To define the callables that compute the PDF representation of your
868 870 objects, define a :meth:`_repr_pdf_` method or use the :meth:`for_type`
869 871 or :meth:`for_type_by_name` methods to register functions that handle
870 872 this.
871 873
872 874 The return value of this formatter should be raw PDF data, *not*
873 875 base64 encoded.
874 876 """
875 877 format_type = Unicode('application/pdf')
876 878
877 879 print_method = ObjectName('_repr_pdf_')
878 880
879 881 _return_type = (bytes, str)
880 882
881 883 class IPythonDisplayFormatter(BaseFormatter):
882 884 """An escape-hatch Formatter for objects that know how to display themselves.
883 885
884 886 To define the callables that compute the representation of your
885 887 objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
886 888 or :meth:`for_type_by_name` methods to register functions that handle
887 889 this. Unlike mime-type displays, this method should not return anything,
888 890 instead calling any appropriate display methods itself.
889 891
890 892 This display formatter has highest priority.
891 893 If it fires, no other display formatter will be called.
892 894
893 895 Prior to IPython 6.1, `_ipython_display_` was the only way to display custom mime-types
894 896 without registering a new Formatter.
895 897
896 898 IPython 6.1 introduces `_repr_mimebundle_` for displaying custom mime-types,
897 899 so `_ipython_display_` should only be used for objects that require unusual
898 900 display patterns, such as multiple display calls.
899 901 """
900 902 print_method = ObjectName('_ipython_display_')
901 903 _return_type = (type(None), bool)
902 904
903 905 @catch_format_error
904 906 def __call__(self, obj):
905 907 """Compute the format for an object."""
906 908 if self.enabled:
907 909 # lookup registered printer
908 910 try:
909 911 printer = self.lookup(obj)
910 912 except KeyError:
911 913 pass
912 914 else:
913 915 printer(obj)
914 916 return True
915 917 # Finally look for special method names
916 918 method = get_real_method(obj, self.print_method)
917 919 if method is not None:
918 920 method()
919 921 return True
920 922
921 923
922 924 class MimeBundleFormatter(BaseFormatter):
923 925 """A Formatter for arbitrary mime-types.
924 926
925 927 Unlike other `_repr_<mimetype>_` methods,
926 928 `_repr_mimebundle_` should return mime-bundle data,
927 929 either the mime-keyed `data` dictionary or the tuple `(data, metadata)`.
928 930 Any mime-type is valid.
929 931
930 932 To define the callables that compute the mime-bundle representation of your
931 933 objects, define a :meth:`_repr_mimebundle_` method or use the :meth:`for_type`
932 934 or :meth:`for_type_by_name` methods to register functions that handle
933 935 this.
934 936
935 937 .. versionadded:: 6.1
936 938 """
937 939 print_method = ObjectName('_repr_mimebundle_')
938 940 _return_type = dict
939 941
940 942 def _check_return(self, r, obj):
941 943 r = super(MimeBundleFormatter, self)._check_return(r, obj)
942 944 # always return (data, metadata):
943 945 if r is None:
944 946 return {}, {}
945 947 if not isinstance(r, tuple):
946 948 return r, {}
947 949 return r
948 950
949 951 @catch_format_error
950 952 def __call__(self, obj, include=None, exclude=None):
951 953 """Compute the format for an object.
952 954
953 955 Identical to parent's method but we pass extra parameters to the method.
954 956
955 957 Unlike other _repr_*_ `_repr_mimebundle_` should allow extra kwargs, in
956 958 particular `include` and `exclude`.
957 959 """
958 960 if self.enabled:
959 961 # lookup registered printer
960 962 try:
961 963 printer = self.lookup(obj)
962 964 except KeyError:
963 965 pass
964 966 else:
965 967 return printer(obj)
966 968 # Finally look for special method names
967 969 method = get_real_method(obj, self.print_method)
968 970
969 971 if method is not None:
970 972 return method(include=include, exclude=exclude)
971 973 return None
972 974 else:
973 975 return None
974 976
975 977
976 978 FormatterABC.register(BaseFormatter)
977 979 FormatterABC.register(PlainTextFormatter)
978 980 FormatterABC.register(HTMLFormatter)
979 981 FormatterABC.register(MarkdownFormatter)
980 982 FormatterABC.register(SVGFormatter)
981 983 FormatterABC.register(PNGFormatter)
982 984 FormatterABC.register(PDFFormatter)
983 985 FormatterABC.register(JPEGFormatter)
984 986 FormatterABC.register(LatexFormatter)
985 987 FormatterABC.register(JSONFormatter)
986 988 FormatterABC.register(JavascriptFormatter)
987 989 FormatterABC.register(IPythonDisplayFormatter)
988 990 FormatterABC.register(MimeBundleFormatter)
989 991
990 992
991 993 def format_display_data(obj, include=None, exclude=None):
992 994 """Return a format data dict for an object.
993 995
994 996 By default all format types will be computed.
995 997
996 998 Parameters
997 999 ----------
998 1000 obj : object
999 1001 The Python object whose format data will be computed.
1000 1002
1001 1003 Returns
1002 1004 -------
1003 1005 format_dict : dict
1004 1006 A dictionary of key/value pairs, one or each format that was
1005 1007 generated for the object. The keys are the format types, which
1006 1008 will usually be MIME type strings and the values and JSON'able
1007 1009 data structure containing the raw data for the representation in
1008 1010 that format.
1009 1011 include : list or tuple, optional
1010 1012 A list of format type strings (MIME types) to include in the
1011 1013 format data dict. If this is set *only* the format types included
1012 1014 in this list will be computed.
1013 1015 exclude : list or tuple, optional
1014 1016 A list of format type string (MIME types) to exclude in the format
1015 1017 data dict. If this is set all format types will be computed,
1016 1018 except for those included in this argument.
1017 1019 """
1018 1020 from .interactiveshell import InteractiveShell
1019 1021
1020 1022 return InteractiveShell.instance().display_formatter.format(
1021 1023 obj,
1022 1024 include,
1023 1025 exclude
1024 1026 )
@@ -1,897 +1,907 b''
1 1 """ History related magics and functionality """
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6
7 7 import atexit
8 8 import datetime
9 9 from pathlib import Path
10 10 import re
11 11 import sqlite3
12 12 import threading
13 13
14 14 from traitlets.config.configurable import LoggingConfigurable
15 15 from decorator import decorator
16 16 from IPython.utils.decorators import undoc
17 17 from IPython.paths import locate_profile
18 18 from traitlets import (
19 19 Any,
20 20 Bool,
21 21 Dict,
22 22 Instance,
23 23 Integer,
24 24 List,
25 25 Unicode,
26 26 Union,
27 27 TraitError,
28 28 default,
29 29 observe,
30 30 )
31 31
32 32 #-----------------------------------------------------------------------------
33 33 # Classes and functions
34 34 #-----------------------------------------------------------------------------
35 35
36 36 @undoc
37 37 class DummyDB(object):
38 38 """Dummy DB that will act as a black hole for history.
39 39
40 40 Only used in the absence of sqlite"""
41 41 def execute(*args, **kwargs):
42 42 return []
43 43
44 44 def commit(self, *args, **kwargs):
45 45 pass
46 46
47 47 def __enter__(self, *args, **kwargs):
48 48 pass
49 49
50 50 def __exit__(self, *args, **kwargs):
51 51 pass
52 52
53 53
54 54 @decorator
55 55 def only_when_enabled(f, self, *a, **kw):
56 56 """Decorator: return an empty list in the absence of sqlite."""
57 57 if not self.enabled:
58 58 return []
59 59 else:
60 60 return f(self, *a, **kw)
61 61
62 62
63 63 # use 16kB as threshold for whether a corrupt history db should be saved
64 64 # that should be at least 100 entries or so
65 65 _SAVE_DB_SIZE = 16384
66 66
67 67 @decorator
68 68 def catch_corrupt_db(f, self, *a, **kw):
69 69 """A decorator which wraps HistoryAccessor method calls to catch errors from
70 70 a corrupt SQLite database, move the old database out of the way, and create
71 71 a new one.
72 72
73 73 We avoid clobbering larger databases because this may be triggered due to filesystem issues,
74 74 not just a corrupt file.
75 75 """
76 76 try:
77 77 return f(self, *a, **kw)
78 78 except (sqlite3.DatabaseError, sqlite3.OperationalError) as e:
79 79 self._corrupt_db_counter += 1
80 80 self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
81 81 if self.hist_file != ':memory:':
82 82 if self._corrupt_db_counter > self._corrupt_db_limit:
83 83 self.hist_file = ':memory:'
84 84 self.log.error("Failed to load history too many times, history will not be saved.")
85 85 elif self.hist_file.is_file():
86 86 # move the file out of the way
87 87 base = str(self.hist_file.parent / self.hist_file.stem)
88 88 ext = self.hist_file.suffix
89 89 size = self.hist_file.stat().st_size
90 90 if size >= _SAVE_DB_SIZE:
91 91 # if there's significant content, avoid clobbering
92 92 now = datetime.datetime.now().isoformat().replace(':', '.')
93 93 newpath = base + '-corrupt-' + now + ext
94 94 # don't clobber previous corrupt backups
95 95 for i in range(100):
96 96 if not Path(newpath).exists():
97 97 break
98 98 else:
99 99 newpath = base + '-corrupt-' + now + (u'-%i' % i) + ext
100 100 else:
101 101 # not much content, possibly empty; don't worry about clobbering
102 102 # maybe we should just delete it?
103 103 newpath = base + '-corrupt' + ext
104 104 self.hist_file.rename(newpath)
105 105 self.log.error("History file was moved to %s and a new file created.", newpath)
106 106 self.init_db()
107 107 return []
108 108 else:
109 109 # Failed with :memory:, something serious is wrong
110 110 raise
111 111
112 112
113 113 class HistoryAccessorBase(LoggingConfigurable):
114 114 """An abstract class for History Accessors """
115 115
116 116 def get_tail(self, n=10, raw=True, output=False, include_latest=False):
117 117 raise NotImplementedError
118 118
119 119 def search(self, pattern="*", raw=True, search_raw=True,
120 120 output=False, n=None, unique=False):
121 121 raise NotImplementedError
122 122
123 123 def get_range(self, session, start=1, stop=None, raw=True,output=False):
124 124 raise NotImplementedError
125 125
126 126 def get_range_by_str(self, rangestr, raw=True, output=False):
127 127 raise NotImplementedError
128 128
129 129
130 130 class HistoryAccessor(HistoryAccessorBase):
131 131 """Access the history database without adding to it.
132 132
133 133 This is intended for use by standalone history tools. IPython shells use
134 134 HistoryManager, below, which is a subclass of this."""
135 135
136 136 # counter for init_db retries, so we don't keep trying over and over
137 137 _corrupt_db_counter = 0
138 138 # after two failures, fallback on :memory:
139 139 _corrupt_db_limit = 2
140 140
141 141 # String holding the path to the history file
142 142 hist_file = Union(
143 143 [Instance(Path), Unicode()],
144 144 help="""Path to file to use for SQLite history database.
145 145
146 146 By default, IPython will put the history database in the IPython
147 147 profile directory. If you would rather share one history among
148 148 profiles, you can set this value in each, so that they are consistent.
149 149
150 150 Due to an issue with fcntl, SQLite is known to misbehave on some NFS
151 151 mounts. If you see IPython hanging, try setting this to something on a
152 152 local disk, e.g::
153 153
154 154 ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
155 155
156 156 you can also use the specific value `:memory:` (including the colon
157 157 at both end but not the back ticks), to avoid creating an history file.
158 158
159 159 """,
160 160 ).tag(config=True)
161 161
162 162 enabled = Bool(True,
163 163 help="""enable the SQLite history
164 164
165 165 set enabled=False to disable the SQLite history,
166 166 in which case there will be no stored history, no SQLite connection,
167 167 and no background saving thread. This may be necessary in some
168 168 threaded environments where IPython is embedded.
169 169 """
170 170 ).tag(config=True)
171 171
172 172 connection_options = Dict(
173 173 help="""Options for configuring the SQLite connection
174 174
175 175 These options are passed as keyword args to sqlite3.connect
176 176 when establishing database connections.
177 177 """
178 178 ).tag(config=True)
179 179
180 180 # The SQLite database
181 181 db = Any()
182 182 @observe('db')
183 183 def _db_changed(self, change):
184 184 """validate the db, since it can be an Instance of two different types"""
185 185 new = change['new']
186 186 connection_types = (DummyDB, sqlite3.Connection)
187 187 if not isinstance(new, connection_types):
188 188 msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
189 189 (self.__class__.__name__, new)
190 190 raise TraitError(msg)
191 191
192 192 def __init__(self, profile="default", hist_file="", **traits):
193 193 """Create a new history accessor.
194 194
195 195 Parameters
196 196 ----------
197 197 profile : str
198 198 The name of the profile from which to open history.
199 199 hist_file : str
200 200 Path to an SQLite history database stored by IPython. If specified,
201 201 hist_file overrides profile.
202 202 config : :class:`~traitlets.config.loader.Config`
203 203 Config object. hist_file can also be set through this.
204 204 """
205 205 # We need a pointer back to the shell for various tasks.
206 206 super(HistoryAccessor, self).__init__(**traits)
207 207 # defer setting hist_file from kwarg until after init,
208 208 # otherwise the default kwarg value would clobber any value
209 209 # set by config
210 210 if hist_file:
211 211 self.hist_file = hist_file
212 212
213 213 try:
214 214 self.hist_file
215 215 except TraitError:
216 216 # No one has set the hist_file, yet.
217 217 self.hist_file = self._get_hist_file_name(profile)
218 218
219 219 self.init_db()
220 220
221 221 def _get_hist_file_name(self, profile='default'):
222 222 """Find the history file for the given profile name.
223 223
224 224 This is overridden by the HistoryManager subclass, to use the shell's
225 225 active profile.
226 226
227 227 Parameters
228 228 ----------
229 229 profile : str
230 230 The name of a profile which has a history file.
231 231 """
232 232 return Path(locate_profile(profile)) / "history.sqlite"
233 233
234 234 @catch_corrupt_db
235 235 def init_db(self):
236 236 """Connect to the database, and create tables if necessary."""
237 237 if not self.enabled:
238 238 self.db = DummyDB()
239 239 return
240 240
241 241 # use detect_types so that timestamps return datetime objects
242 242 kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
243 243 kwargs.update(self.connection_options)
244 244 self.db = sqlite3.connect(str(self.hist_file), **kwargs)
245 245 self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
246 246 primary key autoincrement, start timestamp,
247 247 end timestamp, num_cmds integer, remark text)""")
248 248 self.db.execute("""CREATE TABLE IF NOT EXISTS history
249 249 (session integer, line integer, source text, source_raw text,
250 250 PRIMARY KEY (session, line))""")
251 251 # Output history is optional, but ensure the table's there so it can be
252 252 # enabled later.
253 253 self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
254 254 (session integer, line integer, output text,
255 255 PRIMARY KEY (session, line))""")
256 256 self.db.commit()
257 257 # success! reset corrupt db count
258 258 self._corrupt_db_counter = 0
259 259
260 260 def writeout_cache(self):
261 261 """Overridden by HistoryManager to dump the cache before certain
262 262 database lookups."""
263 263 pass
264 264
265 265 ## -------------------------------
266 266 ## Methods for retrieving history:
267 267 ## -------------------------------
268 268 def _run_sql(self, sql, params, raw=True, output=False):
269 269 """Prepares and runs an SQL query for the history database.
270 270
271 271 Parameters
272 272 ----------
273 273 sql : str
274 274 Any filtering expressions to go after SELECT ... FROM ...
275 275 params : tuple
276 276 Parameters passed to the SQL query (to replace "?")
277 277 raw, output : bool
278 278 See :meth:`get_range`
279 279
280 280 Returns
281 281 -------
282 282 Tuples as :meth:`get_range`
283 283 """
284 284 toget = 'source_raw' if raw else 'source'
285 285 sqlfrom = "history"
286 286 if output:
287 287 sqlfrom = "history LEFT JOIN output_history USING (session, line)"
288 288 toget = "history.%s, output_history.output" % toget
289 289 cur = self.db.execute("SELECT session, line, %s FROM %s " %\
290 290 (toget, sqlfrom) + sql, params)
291 291 if output: # Regroup into 3-tuples, and parse JSON
292 292 return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
293 293 return cur
294 294
295 295 @only_when_enabled
296 296 @catch_corrupt_db
297 297 def get_session_info(self, session):
298 298 """Get info about a session.
299 299
300 300 Parameters
301 301 ----------
302 302
303 303 session : int
304 304 Session number to retrieve.
305 305
306 306 Returns
307 307 -------
308 308
309 309 session_id : int
310 310 Session ID number
311 311 start : datetime
312 312 Timestamp for the start of the session.
313 313 end : datetime
314 314 Timestamp for the end of the session, or None if IPython crashed.
315 315 num_cmds : int
316 316 Number of commands run, or None if IPython crashed.
317 317 remark : unicode
318 318 A manually set description.
319 319 """
320 320 query = "SELECT * from sessions where session == ?"
321 321 return self.db.execute(query, (session,)).fetchone()
322 322
323 323 @catch_corrupt_db
324 324 def get_last_session_id(self):
325 325 """Get the last session ID currently in the database.
326 326
327 327 Within IPython, this should be the same as the value stored in
328 328 :attr:`HistoryManager.session_number`.
329 329 """
330 330 for record in self.get_tail(n=1, include_latest=True):
331 331 return record[0]
332 332
333 333 @catch_corrupt_db
334 334 def get_tail(self, n=10, raw=True, output=False, include_latest=False):
335 335 """Get the last n lines from the history database.
336 336
337 337 Parameters
338 338 ----------
339 339 n : int
340 340 The number of lines to get
341 341 raw, output : bool
342 342 See :meth:`get_range`
343 343 include_latest : bool
344 344 If False (default), n+1 lines are fetched, and the latest one
345 345 is discarded. This is intended to be used where the function
346 346 is called by a user command, which it should not return.
347 347
348 348 Returns
349 349 -------
350 350 Tuples as :meth:`get_range`
351 351 """
352 352 self.writeout_cache()
353 353 if not include_latest:
354 354 n += 1
355 355 cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
356 356 (n,), raw=raw, output=output)
357 357 if not include_latest:
358 358 return reversed(list(cur)[1:])
359 359 return reversed(list(cur))
360 360
361 361 @catch_corrupt_db
362 362 def search(self, pattern="*", raw=True, search_raw=True,
363 363 output=False, n=None, unique=False):
364 364 """Search the database using unix glob-style matching (wildcards
365 365 * and ?).
366 366
367 367 Parameters
368 368 ----------
369 369 pattern : str
370 370 The wildcarded pattern to match when searching
371 371 search_raw : bool
372 372 If True, search the raw input, otherwise, the parsed input
373 373 raw, output : bool
374 374 See :meth:`get_range`
375 375 n : None or int
376 376 If an integer is given, it defines the limit of
377 377 returned entries.
378 378 unique : bool
379 379 When it is true, return only unique entries.
380 380
381 381 Returns
382 382 -------
383 383 Tuples as :meth:`get_range`
384 384 """
385 385 tosearch = "source_raw" if search_raw else "source"
386 386 if output:
387 387 tosearch = "history." + tosearch
388 388 self.writeout_cache()
389 389 sqlform = "WHERE %s GLOB ?" % tosearch
390 390 params = (pattern,)
391 391 if unique:
392 392 sqlform += ' GROUP BY {0}'.format(tosearch)
393 393 if n is not None:
394 394 sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
395 395 params += (n,)
396 396 elif unique:
397 397 sqlform += " ORDER BY session, line"
398 398 cur = self._run_sql(sqlform, params, raw=raw, output=output)
399 399 if n is not None:
400 400 return reversed(list(cur))
401 401 return cur
402 402
403 403 @catch_corrupt_db
404 404 def get_range(self, session, start=1, stop=None, raw=True,output=False):
405 405 """Retrieve input by session.
406 406
407 407 Parameters
408 408 ----------
409 409 session : int
410 410 Session number to retrieve.
411 411 start : int
412 412 First line to retrieve.
413 413 stop : int
414 414 End of line range (excluded from output itself). If None, retrieve
415 415 to the end of the session.
416 416 raw : bool
417 417 If True, return untranslated input
418 418 output : bool
419 419 If True, attempt to include output. This will be 'real' Python
420 420 objects for the current session, or text reprs from previous
421 421 sessions if db_log_output was enabled at the time. Where no output
422 422 is found, None is used.
423 423
424 424 Returns
425 425 -------
426 426 entries
427 427 An iterator over the desired lines. Each line is a 3-tuple, either
428 428 (session, line, input) if output is False, or
429 429 (session, line, (input, output)) if output is True.
430 430 """
431 431 if stop:
432 432 lineclause = "line >= ? AND line < ?"
433 433 params = (session, start, stop)
434 434 else:
435 435 lineclause = "line>=?"
436 436 params = (session, start)
437 437
438 438 return self._run_sql("WHERE session==? AND %s" % lineclause,
439 439 params, raw=raw, output=output)
440 440
441 441 def get_range_by_str(self, rangestr, raw=True, output=False):
442 442 """Get lines of history from a string of ranges, as used by magic
443 443 commands %hist, %save, %macro, etc.
444 444
445 445 Parameters
446 446 ----------
447 447 rangestr : str
448 A string specifying ranges, e.g. "5 ~2/1-4". See
449 :func:`magic_history` for full details.
448 A string specifying ranges, e.g. "5 ~2/1-4". If empty string is used,
449 this will return everything from current session's history.
450
451 See the documentation of :func:`%history` for the full details.
452
450 453 raw, output : bool
451 454 As :meth:`get_range`
452 455
453 456 Returns
454 457 -------
455 458 Tuples as :meth:`get_range`
456 459 """
457 460 for sess, s, e in extract_hist_ranges(rangestr):
458 461 for line in self.get_range(sess, s, e, raw=raw, output=output):
459 462 yield line
460 463
461 464
462 465 class HistoryManager(HistoryAccessor):
463 466 """A class to organize all history-related functionality in one place.
464 467 """
465 468 # Public interface
466 469
467 470 # An instance of the IPython shell we are attached to
468 471 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
469 472 allow_none=True)
470 473 # Lists to hold processed and raw history. These start with a blank entry
471 474 # so that we can index them starting from 1
472 475 input_hist_parsed = List([""])
473 476 input_hist_raw = List([""])
474 477 # A list of directories visited during session
475 478 dir_hist = List()
476 479 @default('dir_hist')
477 480 def _dir_hist_default(self):
478 481 try:
479 482 return [Path.cwd()]
480 483 except OSError:
481 484 return []
482 485
483 486 # A dict of output history, keyed with ints from the shell's
484 487 # execution count.
485 488 output_hist = Dict()
486 489 # The text/plain repr of outputs.
487 490 output_hist_reprs = Dict()
488 491
489 492 # The number of the current session in the history database
490 493 session_number = Integer()
491 494
492 495 db_log_output = Bool(False,
493 496 help="Should the history database include output? (default: no)"
494 497 ).tag(config=True)
495 498 db_cache_size = Integer(0,
496 499 help="Write to database every x commands (higher values save disk access & power).\n"
497 500 "Values of 1 or less effectively disable caching."
498 501 ).tag(config=True)
499 502 # The input and output caches
500 503 db_input_cache = List()
501 504 db_output_cache = List()
502 505
503 506 # History saving in separate thread
504 507 save_thread = Instance('IPython.core.history.HistorySavingThread',
505 508 allow_none=True)
506 509 save_flag = Instance(threading.Event, allow_none=True)
507 510
508 511 # Private interface
509 512 # Variables used to store the three last inputs from the user. On each new
510 513 # history update, we populate the user's namespace with these, shifted as
511 514 # necessary.
512 515 _i00 = Unicode(u'')
513 516 _i = Unicode(u'')
514 517 _ii = Unicode(u'')
515 518 _iii = Unicode(u'')
516 519
517 520 # A regex matching all forms of the exit command, so that we don't store
518 521 # them in the history (it's annoying to rewind the first entry and land on
519 522 # an exit call).
520 523 _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
521 524
522 525 def __init__(self, shell=None, config=None, **traits):
523 526 """Create a new history manager associated with a shell instance.
524 527 """
525 528 # We need a pointer back to the shell for various tasks.
526 529 super(HistoryManager, self).__init__(shell=shell, config=config,
527 530 **traits)
528 531 self.save_flag = threading.Event()
529 532 self.db_input_cache_lock = threading.Lock()
530 533 self.db_output_cache_lock = threading.Lock()
531 534
532 535 try:
533 536 self.new_session()
534 537 except sqlite3.OperationalError:
535 538 self.log.error("Failed to create history session in %s. History will not be saved.",
536 539 self.hist_file, exc_info=True)
537 540 self.hist_file = ':memory:'
538 541
539 542 if self.enabled and self.hist_file != ':memory:':
540 543 self.save_thread = HistorySavingThread(self)
541 544 self.save_thread.start()
542 545
543 546 def _get_hist_file_name(self, profile=None):
544 547 """Get default history file name based on the Shell's profile.
545 548
546 549 The profile parameter is ignored, but must exist for compatibility with
547 550 the parent class."""
548 551 profile_dir = self.shell.profile_dir.location
549 552 return Path(profile_dir) / "history.sqlite"
550 553
551 554 @only_when_enabled
552 555 def new_session(self, conn=None):
553 556 """Get a new session number."""
554 557 if conn is None:
555 558 conn = self.db
556 559
557 560 with conn:
558 561 cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
559 562 NULL, "") """, (datetime.datetime.now(),))
560 563 self.session_number = cur.lastrowid
561 564
562 565 def end_session(self):
563 566 """Close the database session, filling in the end time and line count."""
564 567 self.writeout_cache()
565 568 with self.db:
566 569 self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
567 570 session==?""", (datetime.datetime.now(),
568 571 len(self.input_hist_parsed)-1, self.session_number))
569 572 self.session_number = 0
570 573
571 574 def name_session(self, name):
572 575 """Give the current session a name in the history database."""
573 576 with self.db:
574 577 self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
575 578 (name, self.session_number))
576 579
577 580 def reset(self, new_session=True):
578 581 """Clear the session history, releasing all object references, and
579 582 optionally open a new session."""
580 583 self.output_hist.clear()
581 584 # The directory history can't be completely empty
582 585 self.dir_hist[:] = [Path.cwd()]
583 586
584 587 if new_session:
585 588 if self.session_number:
586 589 self.end_session()
587 590 self.input_hist_parsed[:] = [""]
588 591 self.input_hist_raw[:] = [""]
589 592 self.new_session()
590 593
591 594 # ------------------------------
592 595 # Methods for retrieving history
593 596 # ------------------------------
594 597 def get_session_info(self, session=0):
595 598 """Get info about a session.
596 599
597 600 Parameters
598 601 ----------
599 602
600 603 session : int
601 604 Session number to retrieve. The current session is 0, and negative
602 605 numbers count back from current session, so -1 is the previous session.
603 606
604 607 Returns
605 608 -------
606 609
607 610 session_id : int
608 611 Session ID number
609 612 start : datetime
610 613 Timestamp for the start of the session.
611 614 end : datetime
612 615 Timestamp for the end of the session, or None if IPython crashed.
613 616 num_cmds : int
614 617 Number of commands run, or None if IPython crashed.
615 618 remark : unicode
616 619 A manually set description.
617 620 """
618 621 if session <= 0:
619 622 session += self.session_number
620 623
621 624 return super(HistoryManager, self).get_session_info(session=session)
622 625
623 626 def _get_range_session(self, start=1, stop=None, raw=True, output=False):
624 627 """Get input and output history from the current session. Called by
625 628 get_range, and takes similar parameters."""
626 629 input_hist = self.input_hist_raw if raw else self.input_hist_parsed
627 630
628 631 n = len(input_hist)
629 632 if start < 0:
630 633 start += n
631 634 if not stop or (stop > n):
632 635 stop = n
633 636 elif stop < 0:
634 637 stop += n
635 638
636 639 for i in range(start, stop):
637 640 if output:
638 641 line = (input_hist[i], self.output_hist_reprs.get(i))
639 642 else:
640 643 line = input_hist[i]
641 644 yield (0, i, line)
642 645
643 646 def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
644 647 """Retrieve input by session.
645 648
646 649 Parameters
647 650 ----------
648 651 session : int
649 652 Session number to retrieve. The current session is 0, and negative
650 653 numbers count back from current session, so -1 is previous session.
651 654 start : int
652 655 First line to retrieve.
653 656 stop : int
654 657 End of line range (excluded from output itself). If None, retrieve
655 658 to the end of the session.
656 659 raw : bool
657 660 If True, return untranslated input
658 661 output : bool
659 662 If True, attempt to include output. This will be 'real' Python
660 663 objects for the current session, or text reprs from previous
661 664 sessions if db_log_output was enabled at the time. Where no output
662 665 is found, None is used.
663 666
664 667 Returns
665 668 -------
666 669 entries
667 670 An iterator over the desired lines. Each line is a 3-tuple, either
668 671 (session, line, input) if output is False, or
669 672 (session, line, (input, output)) if output is True.
670 673 """
671 674 if session <= 0:
672 675 session += self.session_number
673 676 if session==self.session_number: # Current session
674 677 return self._get_range_session(start, stop, raw, output)
675 678 return super(HistoryManager, self).get_range(session, start, stop, raw,
676 679 output)
677 680
678 681 ## ----------------------------
679 682 ## Methods for storing history:
680 683 ## ----------------------------
681 684 def store_inputs(self, line_num, source, source_raw=None):
682 685 """Store source and raw input in history and create input cache
683 686 variables ``_i*``.
684 687
685 688 Parameters
686 689 ----------
687 690 line_num : int
688 691 The prompt number of this input.
689 692
690 693 source : str
691 694 Python input.
692 695
693 696 source_raw : str, optional
694 697 If given, this is the raw input without any IPython transformations
695 698 applied to it. If not given, ``source`` is used.
696 699 """
697 700 if source_raw is None:
698 701 source_raw = source
699 702 source = source.rstrip('\n')
700 703 source_raw = source_raw.rstrip('\n')
701 704
702 705 # do not store exit/quit commands
703 706 if self._exit_re.match(source_raw.strip()):
704 707 return
705 708
706 709 self.input_hist_parsed.append(source)
707 710 self.input_hist_raw.append(source_raw)
708 711
709 712 with self.db_input_cache_lock:
710 713 self.db_input_cache.append((line_num, source, source_raw))
711 714 # Trigger to flush cache and write to DB.
712 715 if len(self.db_input_cache) >= self.db_cache_size:
713 716 self.save_flag.set()
714 717
715 718 # update the auto _i variables
716 719 self._iii = self._ii
717 720 self._ii = self._i
718 721 self._i = self._i00
719 722 self._i00 = source_raw
720 723
721 724 # hackish access to user namespace to create _i1,_i2... dynamically
722 725 new_i = '_i%s' % line_num
723 726 to_main = {'_i': self._i,
724 727 '_ii': self._ii,
725 728 '_iii': self._iii,
726 729 new_i : self._i00 }
727 730
728 731 if self.shell is not None:
729 732 self.shell.push(to_main, interactive=False)
730 733
731 734 def store_output(self, line_num):
732 735 """If database output logging is enabled, this saves all the
733 736 outputs from the indicated prompt number to the database. It's
734 737 called by run_cell after code has been executed.
735 738
736 739 Parameters
737 740 ----------
738 741 line_num : int
739 742 The line number from which to save outputs
740 743 """
741 744 if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
742 745 return
743 746 output = self.output_hist_reprs[line_num]
744 747
745 748 with self.db_output_cache_lock:
746 749 self.db_output_cache.append((line_num, output))
747 750 if self.db_cache_size <= 1:
748 751 self.save_flag.set()
749 752
750 753 def _writeout_input_cache(self, conn):
751 754 with conn:
752 755 for line in self.db_input_cache:
753 756 conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
754 757 (self.session_number,)+line)
755 758
756 759 def _writeout_output_cache(self, conn):
757 760 with conn:
758 761 for line in self.db_output_cache:
759 762 conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
760 763 (self.session_number,)+line)
761 764
762 765 @only_when_enabled
763 766 def writeout_cache(self, conn=None):
764 767 """Write any entries in the cache to the database."""
765 768 if conn is None:
766 769 conn = self.db
767 770
768 771 with self.db_input_cache_lock:
769 772 try:
770 773 self._writeout_input_cache(conn)
771 774 except sqlite3.IntegrityError:
772 775 self.new_session(conn)
773 776 print("ERROR! Session/line number was not unique in",
774 777 "database. History logging moved to new session",
775 778 self.session_number)
776 779 try:
777 780 # Try writing to the new session. If this fails, don't
778 781 # recurse
779 782 self._writeout_input_cache(conn)
780 783 except sqlite3.IntegrityError:
781 784 pass
782 785 finally:
783 786 self.db_input_cache = []
784 787
785 788 with self.db_output_cache_lock:
786 789 try:
787 790 self._writeout_output_cache(conn)
788 791 except sqlite3.IntegrityError:
789 792 print("!! Session/line number for output was not unique",
790 793 "in database. Output will not be stored.")
791 794 finally:
792 795 self.db_output_cache = []
793 796
794 797
795 798 class HistorySavingThread(threading.Thread):
796 799 """This thread takes care of writing history to the database, so that
797 800 the UI isn't held up while that happens.
798 801
799 802 It waits for the HistoryManager's save_flag to be set, then writes out
800 803 the history cache. The main thread is responsible for setting the flag when
801 804 the cache size reaches a defined threshold."""
802 805 daemon = True
803 806 stop_now = False
804 807 enabled = True
805 808 def __init__(self, history_manager):
806 809 super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
807 810 self.history_manager = history_manager
808 811 self.enabled = history_manager.enabled
809 812 atexit.register(self.stop)
810 813
811 814 @only_when_enabled
812 815 def run(self):
813 816 # We need a separate db connection per thread:
814 817 try:
815 818 self.db = sqlite3.connect(
816 819 str(self.history_manager.hist_file),
817 820 **self.history_manager.connection_options
818 821 )
819 822 while True:
820 823 self.history_manager.save_flag.wait()
821 824 if self.stop_now:
822 825 self.db.close()
823 826 return
824 827 self.history_manager.save_flag.clear()
825 828 self.history_manager.writeout_cache(self.db)
826 829 except Exception as e:
827 830 print(("The history saving thread hit an unexpected error (%s)."
828 831 "History will not be written to the database.") % repr(e))
829 832
830 833 def stop(self):
831 834 """This can be called from the main thread to safely stop this thread.
832 835
833 836 Note that it does not attempt to write out remaining history before
834 837 exiting. That should be done by calling the HistoryManager's
835 838 end_session method."""
836 839 self.stop_now = True
837 840 self.history_manager.save_flag.set()
838 841 self.join()
839 842
840 843
841 844 # To match, e.g. ~5/8-~2/3
842 845 range_re = re.compile(r"""
843 846 ((?P<startsess>~?\d+)/)?
844 847 (?P<start>\d+)?
845 848 ((?P<sep>[\-:])
846 849 ((?P<endsess>~?\d+)/)?
847 850 (?P<end>\d+))?
848 851 $""", re.VERBOSE)
849 852
850 853
851 854 def extract_hist_ranges(ranges_str):
852 855 """Turn a string of history ranges into 3-tuples of (session, start, stop).
853 856
857 Empty string results in a `[(0, 1, None)]`, i.e. "everything from current
858 session".
859
854 860 Examples
855 861 --------
856 862 >>> list(extract_hist_ranges("~8/5-~7/4 2"))
857 863 [(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
858 864 """
865 if ranges_str == "":
866 yield (0, 1, None) # Everything from current session
867 return
868
859 869 for range_str in ranges_str.split():
860 870 rmatch = range_re.match(range_str)
861 871 if not rmatch:
862 872 continue
863 873 start = rmatch.group("start")
864 874 if start:
865 875 start = int(start)
866 876 end = rmatch.group("end")
867 877 # If no end specified, get (a, a + 1)
868 878 end = int(end) if end else start + 1
869 879 else: # start not specified
870 880 if not rmatch.group('startsess'): # no startsess
871 881 continue
872 882 start = 1
873 883 end = None # provide the entire session hist
874 884
875 885 if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
876 886 end += 1
877 887 startsess = rmatch.group("startsess") or "0"
878 888 endsess = rmatch.group("endsess") or startsess
879 889 startsess = int(startsess.replace("~","-"))
880 890 endsess = int(endsess.replace("~","-"))
881 891 assert endsess >= startsess, "start session must be earlier than end session"
882 892
883 893 if endsess == startsess:
884 894 yield (startsess, start, end)
885 895 continue
886 896 # Multiple sessions in one range:
887 897 yield (startsess, start, None)
888 898 for sess in range(startsess+1, endsess):
889 899 yield (sess, 1, None)
890 900 yield (endsess, 1, end)
891 901
892 902
893 903 def _format_lineno(session, line):
894 904 """Helper function to format line numbers properly."""
895 905 if session == 0:
896 906 return str(line)
897 907 return "%s#%s" % (session, line)
@@ -1,729 +1,799 b''
1 1 """Input transformer machinery to support IPython special syntax.
2 2
3 3 This includes the machinery to recognise and transform ``%magic`` commands,
4 4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
5 5
6 6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
7 7 deprecated in 7.0.
8 8 """
9 9
10 10 # Copyright (c) IPython Development Team.
11 11 # Distributed under the terms of the Modified BSD License.
12 12
13 from codeop import compile_command
13 import ast
14 import sys
15 from codeop import CommandCompiler, Compile
14 16 import re
15 17 import tokenize
16 18 from typing import List, Tuple, Optional, Any
17 19 import warnings
18 20
19 21 _indent_re = re.compile(r'^[ \t]+')
20 22
21 23 def leading_empty_lines(lines):
22 24 """Remove leading empty lines
23 25
24 26 If the leading lines are empty or contain only whitespace, they will be
25 27 removed.
26 28 """
27 29 if not lines:
28 30 return lines
29 31 for i, line in enumerate(lines):
30 32 if line and not line.isspace():
31 33 return lines[i:]
32 34 return lines
33 35
34 36 def leading_indent(lines):
35 37 """Remove leading indentation.
36 38
37 39 If the first line starts with a spaces or tabs, the same whitespace will be
38 40 removed from each following line in the cell.
39 41 """
40 42 if not lines:
41 43 return lines
42 44 m = _indent_re.match(lines[0])
43 45 if not m:
44 46 return lines
45 47 space = m.group(0)
46 48 n = len(space)
47 49 return [l[n:] if l.startswith(space) else l
48 50 for l in lines]
49 51
50 52 class PromptStripper:
51 53 """Remove matching input prompts from a block of input.
52 54
53 55 Parameters
54 56 ----------
55 57 prompt_re : regular expression
56 58 A regular expression matching any input prompt (including continuation,
57 59 e.g. ``...``)
58 60 initial_re : regular expression, optional
59 61 A regular expression matching only the initial prompt, but not continuation.
60 62 If no initial expression is given, prompt_re will be used everywhere.
61 63 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
62 64 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
63 65
64 66 Notes
65 67 -----
66 68
67 69 If initial_re and prompt_re differ,
68 70 only initial_re will be tested against the first line.
69 71 If any prompt is found on the first two lines,
70 72 prompts will be stripped from the rest of the block.
71 73 """
72 74 def __init__(self, prompt_re, initial_re=None):
73 75 self.prompt_re = prompt_re
74 76 self.initial_re = initial_re or prompt_re
75 77
76 78 def _strip(self, lines):
77 79 return [self.prompt_re.sub('', l, count=1) for l in lines]
78 80
79 81 def __call__(self, lines):
80 82 if not lines:
81 83 return lines
82 84 if self.initial_re.match(lines[0]) or \
83 85 (len(lines) > 1 and self.prompt_re.match(lines[1])):
84 86 return self._strip(lines)
85 87 return lines
86 88
87 89 classic_prompt = PromptStripper(
88 90 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
89 91 initial_re=re.compile(r'^>>>( |$)')
90 92 )
91 93
92 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
94 ipython_prompt = PromptStripper(
95 re.compile(
96 r"""
97 ^( # Match from the beginning of a line, either:
98
99 # 1. First-line prompt:
100 ((\[nav\]|\[ins\])?\ )? # Vi editing mode prompt, if it's there
101 In\ # The 'In' of the prompt, with a space
102 \[\d+\]: # Command index, as displayed in the prompt
103 \ # With a mandatory trailing space
104
105 | # ... or ...
106
107 # 2. The three dots of the multiline prompt
108 \s* # All leading whitespace characters
109 \.{3,}: # The three (or more) dots
110 \ ? # With an optional trailing space
111
112 )
113 """,
114 re.VERBOSE,
115 )
116 )
117
93 118
94 119 def cell_magic(lines):
95 120 if not lines or not lines[0].startswith('%%'):
96 121 return lines
97 122 if re.match(r'%%\w+\?', lines[0]):
98 123 # This case will be handled by help_end
99 124 return lines
100 125 magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
101 126 body = ''.join(lines[1:])
102 127 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
103 128 % (magic_name, first_line, body)]
104 129
105 130
106 131 def _find_assign_op(token_line) -> Optional[int]:
107 132 """Get the index of the first assignment in the line ('=' not inside brackets)
108 133
109 134 Note: We don't try to support multiple special assignment (a = b = %foo)
110 135 """
111 136 paren_level = 0
112 137 for i, ti in enumerate(token_line):
113 138 s = ti.string
114 139 if s == '=' and paren_level == 0:
115 140 return i
116 141 if s in {'(','[','{'}:
117 142 paren_level += 1
118 143 elif s in {')', ']', '}'}:
119 144 if paren_level > 0:
120 145 paren_level -= 1
121 146 return None
122 147
123 148 def find_end_of_continued_line(lines, start_line: int):
124 149 """Find the last line of a line explicitly extended using backslashes.
125 150
126 151 Uses 0-indexed line numbers.
127 152 """
128 153 end_line = start_line
129 154 while lines[end_line].endswith('\\\n'):
130 155 end_line += 1
131 156 if end_line >= len(lines):
132 157 break
133 158 return end_line
134 159
135 160 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
136 161 r"""Assemble a single line from multiple continued line pieces
137 162
138 163 Continued lines are lines ending in ``\``, and the line following the last
139 164 ``\`` in the block.
140 165
141 166 For example, this code continues over multiple lines::
142 167
143 168 if (assign_ix is not None) \
144 169 and (len(line) >= assign_ix + 2) \
145 170 and (line[assign_ix+1].string == '%') \
146 171 and (line[assign_ix+2].type == tokenize.NAME):
147 172
148 173 This statement contains four continued line pieces.
149 174 Assembling these pieces into a single line would give::
150 175
151 176 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
152 177
153 178 This uses 0-indexed line numbers. *start* is (lineno, colno).
154 179
155 180 Used to allow ``%magic`` and ``!system`` commands to be continued over
156 181 multiple lines.
157 182 """
158 183 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
159 184 return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
160 185 + [parts[-1].rstrip()]) # Strip newline from last line
161 186
162 187 class TokenTransformBase:
163 188 """Base class for transformations which examine tokens.
164 189
165 190 Special syntax should not be transformed when it occurs inside strings or
166 191 comments. This is hard to reliably avoid with regexes. The solution is to
167 192 tokenise the code as Python, and recognise the special syntax in the tokens.
168 193
169 194 IPython's special syntax is not valid Python syntax, so tokenising may go
170 195 wrong after the special syntax starts. These classes therefore find and
171 196 transform *one* instance of special syntax at a time into regular Python
172 197 syntax. After each transformation, tokens are regenerated to find the next
173 198 piece of special syntax.
174 199
175 200 Subclasses need to implement one class method (find)
176 201 and one regular method (transform).
177 202
178 203 The priority attribute can select which transformation to apply if multiple
179 204 transformers match in the same place. Lower numbers have higher priority.
180 205 This allows "%magic?" to be turned into a help call rather than a magic call.
181 206 """
182 207 # Lower numbers -> higher priority (for matches in the same location)
183 208 priority = 10
184 209
185 210 def sortby(self):
186 211 return self.start_line, self.start_col, self.priority
187 212
188 213 def __init__(self, start):
189 214 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
190 215 self.start_col = start[1]
191 216
192 217 @classmethod
193 218 def find(cls, tokens_by_line):
194 219 """Find one instance of special syntax in the provided tokens.
195 220
196 221 Tokens are grouped into logical lines for convenience,
197 222 so it is easy to e.g. look at the first token of each line.
198 223 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
199 224
200 225 This should return an instance of its class, pointing to the start
201 226 position it has found, or None if it found no match.
202 227 """
203 228 raise NotImplementedError
204 229
205 230 def transform(self, lines: List[str]):
206 231 """Transform one instance of special syntax found by ``find()``
207 232
208 233 Takes a list of strings representing physical lines,
209 234 returns a similar list of transformed lines.
210 235 """
211 236 raise NotImplementedError
212 237
213 238 class MagicAssign(TokenTransformBase):
214 239 """Transformer for assignments from magics (a = %foo)"""
215 240 @classmethod
216 241 def find(cls, tokens_by_line):
217 242 """Find the first magic assignment (a = %foo) in the cell.
218 243 """
219 244 for line in tokens_by_line:
220 245 assign_ix = _find_assign_op(line)
221 246 if (assign_ix is not None) \
222 247 and (len(line) >= assign_ix + 2) \
223 248 and (line[assign_ix+1].string == '%') \
224 249 and (line[assign_ix+2].type == tokenize.NAME):
225 250 return cls(line[assign_ix+1].start)
226 251
227 252 def transform(self, lines: List[str]):
228 253 """Transform a magic assignment found by the ``find()`` classmethod.
229 254 """
230 255 start_line, start_col = self.start_line, self.start_col
231 256 lhs = lines[start_line][:start_col]
232 257 end_line = find_end_of_continued_line(lines, start_line)
233 258 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
234 259 assert rhs.startswith('%'), rhs
235 260 magic_name, _, args = rhs[1:].partition(' ')
236 261
237 262 lines_before = lines[:start_line]
238 263 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
239 264 new_line = lhs + call + '\n'
240 265 lines_after = lines[end_line+1:]
241 266
242 267 return lines_before + [new_line] + lines_after
243 268
244 269
245 270 class SystemAssign(TokenTransformBase):
246 271 """Transformer for assignments from system commands (a = !foo)"""
247 272 @classmethod
248 273 def find(cls, tokens_by_line):
249 274 """Find the first system assignment (a = !foo) in the cell.
250 275 """
251 276 for line in tokens_by_line:
252 277 assign_ix = _find_assign_op(line)
253 278 if (assign_ix is not None) \
254 279 and not line[assign_ix].line.strip().startswith('=') \
255 280 and (len(line) >= assign_ix + 2) \
256 281 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
257 282 ix = assign_ix + 1
258 283
259 284 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
260 285 if line[ix].string == '!':
261 286 return cls(line[ix].start)
262 287 elif not line[ix].string.isspace():
263 288 break
264 289 ix += 1
265 290
266 291 def transform(self, lines: List[str]):
267 292 """Transform a system assignment found by the ``find()`` classmethod.
268 293 """
269 294 start_line, start_col = self.start_line, self.start_col
270 295
271 296 lhs = lines[start_line][:start_col]
272 297 end_line = find_end_of_continued_line(lines, start_line)
273 298 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
274 299 assert rhs.startswith('!'), rhs
275 300 cmd = rhs[1:]
276 301
277 302 lines_before = lines[:start_line]
278 303 call = "get_ipython().getoutput({!r})".format(cmd)
279 304 new_line = lhs + call + '\n'
280 305 lines_after = lines[end_line + 1:]
281 306
282 307 return lines_before + [new_line] + lines_after
283 308
284 309 # The escape sequences that define the syntax transformations IPython will
285 310 # apply to user input. These can NOT be just changed here: many regular
286 311 # expressions and other parts of the code may use their hardcoded values, and
287 312 # for all intents and purposes they constitute the 'IPython syntax', so they
288 313 # should be considered fixed.
289 314
290 315 ESC_SHELL = '!' # Send line to underlying system shell
291 316 ESC_SH_CAP = '!!' # Send line to system shell and capture output
292 317 ESC_HELP = '?' # Find information about object
293 318 ESC_HELP2 = '??' # Find extra-detailed information about object
294 319 ESC_MAGIC = '%' # Call magic function
295 320 ESC_MAGIC2 = '%%' # Call cell-magic function
296 321 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
297 322 ESC_QUOTE2 = ';' # Quote all args as a single string, call
298 323 ESC_PAREN = '/' # Call first argument with rest of line as arguments
299 324
300 325 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
301 326 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
302 327
303 328 def _make_help_call(target, esc, next_input=None):
304 329 """Prepares a pinfo(2)/psearch call from a target name and the escape
305 330 (i.e. ? or ??)"""
306 331 method = 'pinfo2' if esc == '??' \
307 332 else 'psearch' if '*' in target \
308 333 else 'pinfo'
309 334 arg = " ".join([method, target])
310 335 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
311 336 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
312 337 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
313 338 if next_input is None:
314 339 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
315 340 else:
316 341 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
317 342 (next_input, t_magic_name, t_magic_arg_s)
318 343
319 344 def _tr_help(content):
320 345 """Translate lines escaped with: ?
321 346
322 347 A naked help line should fire the intro help screen (shell.show_usage())
323 348 """
324 349 if not content:
325 350 return 'get_ipython().show_usage()'
326 351
327 352 return _make_help_call(content, '?')
328 353
329 354 def _tr_help2(content):
330 355 """Translate lines escaped with: ??
331 356
332 357 A naked help line should fire the intro help screen (shell.show_usage())
333 358 """
334 359 if not content:
335 360 return 'get_ipython().show_usage()'
336 361
337 362 return _make_help_call(content, '??')
338 363
339 364 def _tr_magic(content):
340 365 "Translate lines escaped with a percent sign: %"
341 366 name, _, args = content.partition(' ')
342 367 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
343 368
344 369 def _tr_quote(content):
345 370 "Translate lines escaped with a comma: ,"
346 371 name, _, args = content.partition(' ')
347 372 return '%s("%s")' % (name, '", "'.join(args.split()) )
348 373
349 374 def _tr_quote2(content):
350 375 "Translate lines escaped with a semicolon: ;"
351 376 name, _, args = content.partition(' ')
352 377 return '%s("%s")' % (name, args)
353 378
354 379 def _tr_paren(content):
355 380 "Translate lines escaped with a slash: /"
356 381 name, _, args = content.partition(' ')
357 382 return '%s(%s)' % (name, ", ".join(args.split()))
358 383
359 384 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
360 385 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
361 386 ESC_HELP : _tr_help,
362 387 ESC_HELP2 : _tr_help2,
363 388 ESC_MAGIC : _tr_magic,
364 389 ESC_QUOTE : _tr_quote,
365 390 ESC_QUOTE2 : _tr_quote2,
366 391 ESC_PAREN : _tr_paren }
367 392
368 393 class EscapedCommand(TokenTransformBase):
369 394 """Transformer for escaped commands like %foo, !foo, or /foo"""
370 395 @classmethod
371 396 def find(cls, tokens_by_line):
372 397 """Find the first escaped command (%foo, !foo, etc.) in the cell.
373 398 """
374 399 for line in tokens_by_line:
375 400 if not line:
376 401 continue
377 402 ix = 0
378 403 ll = len(line)
379 404 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
380 405 ix += 1
381 406 if ix >= ll:
382 407 continue
383 408 if line[ix].string in ESCAPE_SINGLES:
384 409 return cls(line[ix].start)
385 410
386 411 def transform(self, lines):
387 412 """Transform an escaped line found by the ``find()`` classmethod.
388 413 """
389 414 start_line, start_col = self.start_line, self.start_col
390 415
391 416 indent = lines[start_line][:start_col]
392 417 end_line = find_end_of_continued_line(lines, start_line)
393 418 line = assemble_continued_line(lines, (start_line, start_col), end_line)
394 419
395 420 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
396 421 escape, content = line[:2], line[2:]
397 422 else:
398 423 escape, content = line[:1], line[1:]
399 424
400 425 if escape in tr:
401 426 call = tr[escape](content)
402 427 else:
403 428 call = ''
404 429
405 430 lines_before = lines[:start_line]
406 431 new_line = indent + call + '\n'
407 432 lines_after = lines[end_line + 1:]
408 433
409 434 return lines_before + [new_line] + lines_after
410 435
411 436 _help_end_re = re.compile(r"""(%{0,2}
412 437 (?!\d)[\w*]+ # Variable name
413 438 (\.(?!\d)[\w*]+)* # .etc.etc
414 439 )
415 440 (\?\??)$ # ? or ??
416 441 """,
417 442 re.VERBOSE)
418 443
419 444 class HelpEnd(TokenTransformBase):
420 445 """Transformer for help syntax: obj? and obj??"""
421 446 # This needs to be higher priority (lower number) than EscapedCommand so
422 447 # that inspecting magics (%foo?) works.
423 448 priority = 5
424 449
425 450 def __init__(self, start, q_locn):
426 451 super().__init__(start)
427 452 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
428 453 self.q_col = q_locn[1]
429 454
430 455 @classmethod
431 456 def find(cls, tokens_by_line):
432 457 """Find the first help command (foo?) in the cell.
433 458 """
434 459 for line in tokens_by_line:
435 460 # Last token is NEWLINE; look at last but one
436 461 if len(line) > 2 and line[-2].string == '?':
437 462 # Find the first token that's not INDENT/DEDENT
438 463 ix = 0
439 464 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
440 465 ix += 1
441 466 return cls(line[ix].start, line[-2].start)
442 467
443 468 def transform(self, lines):
444 469 """Transform a help command found by the ``find()`` classmethod.
445 470 """
446 471 piece = ''.join(lines[self.start_line:self.q_line+1])
447 472 indent, content = piece[:self.start_col], piece[self.start_col:]
448 473 lines_before = lines[:self.start_line]
449 474 lines_after = lines[self.q_line + 1:]
450 475
451 476 m = _help_end_re.search(content)
452 477 if not m:
453 478 raise SyntaxError(content)
454 479 assert m is not None, content
455 480 target = m.group(1)
456 481 esc = m.group(3)
457 482
458 483 # If we're mid-command, put it back on the next prompt for the user.
459 484 next_input = None
460 485 if (not lines_before) and (not lines_after) \
461 486 and content.strip() != m.group(0):
462 487 next_input = content.rstrip('?\n')
463 488
464 489 call = _make_help_call(target, esc, next_input=next_input)
465 490 new_line = indent + call + '\n'
466 491
467 492 return lines_before + [new_line] + lines_after
468 493
469 494 def make_tokens_by_line(lines:List[str]):
470 495 """Tokenize a series of lines and group tokens by line.
471 496
472 497 The tokens for a multiline Python string or expression are grouped as one
473 498 line. All lines except the last lines should keep their line ending ('\\n',
474 499 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
475 500 for example when passing block of text to this function.
476 501
477 502 """
478 503 # NL tokens are used inside multiline expressions, but also after blank
479 504 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
480 505 # We want to group the former case together but split the latter, so we
481 506 # track parentheses level, similar to the internals of tokenize.
482 507
483 508 # reexported from token on 3.7+
484 509 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
485 510 tokens_by_line:List[List[Any]] = [[]]
486 511 if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
487 512 warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
488 513 parenlev = 0
489 514 try:
490 515 for token in tokenize.generate_tokens(iter(lines).__next__):
491 516 tokens_by_line[-1].append(token)
492 517 if (token.type == NEWLINE) \
493 518 or ((token.type == NL) and (parenlev <= 0)):
494 519 tokens_by_line.append([])
495 520 elif token.string in {'(', '[', '{'}:
496 521 parenlev += 1
497 522 elif token.string in {')', ']', '}'}:
498 523 if parenlev > 0:
499 524 parenlev -= 1
500 525 except tokenize.TokenError:
501 526 # Input ended in a multiline string or expression. That's OK for us.
502 527 pass
503 528
504 529
505 530 if not tokens_by_line[-1]:
506 531 tokens_by_line.pop()
507 532
508 533
509 534 return tokens_by_line
510 535
536
537 def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
538 """Check if the depth of brackets in the list of tokens drops below 0"""
539 parenlev = 0
540 for token in tokens:
541 if token.string in {"(", "[", "{"}:
542 parenlev += 1
543 elif token.string in {")", "]", "}"}:
544 parenlev -= 1
545 if parenlev < 0:
546 return True
547 return False
548
549
511 550 def show_linewise_tokens(s: str):
512 551 """For investigation and debugging"""
513 552 if not s.endswith('\n'):
514 553 s += '\n'
515 554 lines = s.splitlines(keepends=True)
516 555 for line in make_tokens_by_line(lines):
517 556 print("Line -------")
518 557 for tokinfo in line:
519 558 print(" ", tokinfo)
520 559
521 560 # Arbitrary limit to prevent getting stuck in infinite loops
522 561 TRANSFORM_LOOP_LIMIT = 500
523 562
524 563 class TransformerManager:
525 564 """Applies various transformations to a cell or code block.
526 565
527 566 The key methods for external use are ``transform_cell()``
528 567 and ``check_complete()``.
529 568 """
530 569 def __init__(self):
531 570 self.cleanup_transforms = [
532 571 leading_empty_lines,
533 572 leading_indent,
534 573 classic_prompt,
535 574 ipython_prompt,
536 575 ]
537 576 self.line_transforms = [
538 577 cell_magic,
539 578 ]
540 579 self.token_transformers = [
541 580 MagicAssign,
542 581 SystemAssign,
543 582 EscapedCommand,
544 583 HelpEnd,
545 584 ]
546 585
547 586 def do_one_token_transform(self, lines):
548 587 """Find and run the transform earliest in the code.
549 588
550 589 Returns (changed, lines).
551 590
552 591 This method is called repeatedly until changed is False, indicating
553 592 that all available transformations are complete.
554 593
555 594 The tokens following IPython special syntax might not be valid, so
556 595 the transformed code is retokenised every time to identify the next
557 596 piece of special syntax. Hopefully long code cells are mostly valid
558 597 Python, not using lots of IPython special syntax, so this shouldn't be
559 598 a performance issue.
560 599 """
561 600 tokens_by_line = make_tokens_by_line(lines)
562 601 candidates = []
563 602 for transformer_cls in self.token_transformers:
564 603 transformer = transformer_cls.find(tokens_by_line)
565 604 if transformer:
566 605 candidates.append(transformer)
567 606
568 607 if not candidates:
569 608 # Nothing to transform
570 609 return False, lines
571 610 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
572 611 for transformer in ordered_transformers:
573 612 try:
574 613 return True, transformer.transform(lines)
575 614 except SyntaxError:
576 615 pass
577 616 return False, lines
578 617
579 618 def do_token_transforms(self, lines):
580 619 for _ in range(TRANSFORM_LOOP_LIMIT):
581 620 changed, lines = self.do_one_token_transform(lines)
582 621 if not changed:
583 622 return lines
584 623
585 624 raise RuntimeError("Input transformation still changing after "
586 625 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
587 626
588 627 def transform_cell(self, cell: str) -> str:
589 628 """Transforms a cell of input code"""
590 629 if not cell.endswith('\n'):
591 630 cell += '\n' # Ensure the cell has a trailing newline
592 631 lines = cell.splitlines(keepends=True)
593 632 for transform in self.cleanup_transforms + self.line_transforms:
594 633 lines = transform(lines)
595 634
596 635 lines = self.do_token_transforms(lines)
597 636 return ''.join(lines)
598 637
599 638 def check_complete(self, cell: str):
600 639 """Return whether a block of code is ready to execute, or should be continued
601 640
602 641 Parameters
603 642 ----------
604 643 source : string
605 644 Python input code, which can be multiline.
606 645
607 646 Returns
608 647 -------
609 648 status : str
610 649 One of 'complete', 'incomplete', or 'invalid' if source is not a
611 650 prefix of valid code.
612 651 indent_spaces : int or None
613 652 The number of spaces by which to indent the next line of code. If
614 653 status is not 'incomplete', this is None.
615 654 """
616 655 # Remember if the lines ends in a new line.
617 656 ends_with_newline = False
618 657 for character in reversed(cell):
619 658 if character == '\n':
620 659 ends_with_newline = True
621 660 break
622 661 elif character.strip():
623 662 break
624 663 else:
625 664 continue
626 665
627 666 if not ends_with_newline:
628 667 # Append an newline for consistent tokenization
629 668 # See https://bugs.python.org/issue33899
630 669 cell += '\n'
631 670
632 671 lines = cell.splitlines(keepends=True)
633 672
634 673 if not lines:
635 674 return 'complete', None
636 675
637 676 if lines[-1].endswith('\\'):
638 677 # Explicit backslash continuation
639 678 return 'incomplete', find_last_indent(lines)
640 679
641 680 try:
642 681 for transform in self.cleanup_transforms:
643 682 if not getattr(transform, 'has_side_effects', False):
644 683 lines = transform(lines)
645 684 except SyntaxError:
646 685 return 'invalid', None
647 686
648 687 if lines[0].startswith('%%'):
649 688 # Special case for cell magics - completion marked by blank line
650 689 if lines[-1].strip():
651 690 return 'incomplete', find_last_indent(lines)
652 691 else:
653 692 return 'complete', None
654 693
655 694 try:
656 695 for transform in self.line_transforms:
657 696 if not getattr(transform, 'has_side_effects', False):
658 697 lines = transform(lines)
659 698 lines = self.do_token_transforms(lines)
660 699 except SyntaxError:
661 700 return 'invalid', None
662 701
663 702 tokens_by_line = make_tokens_by_line(lines)
664 703
704 # Bail if we got one line and there are more closing parentheses than
705 # the opening ones
706 if (
707 len(lines) == 1
708 and tokens_by_line
709 and has_sunken_brackets(tokens_by_line[0])
710 ):
711 return "invalid", None
712
665 713 if not tokens_by_line:
666 714 return 'incomplete', find_last_indent(lines)
667 715
668 716 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
669 717 # We're in a multiline string or expression
670 718 return 'incomplete', find_last_indent(lines)
671 719
672 720 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
673 721
674 722 # Pop the last line which only contains DEDENTs and ENDMARKER
675 723 last_token_line = None
676 724 if {t.type for t in tokens_by_line[-1]} in [
677 725 {tokenize.DEDENT, tokenize.ENDMARKER},
678 726 {tokenize.ENDMARKER}
679 727 ] and len(tokens_by_line) > 1:
680 728 last_token_line = tokens_by_line.pop()
681 729
682 730 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
683 731 tokens_by_line[-1].pop()
684 732
685 733 if not tokens_by_line[-1]:
686 734 return 'incomplete', find_last_indent(lines)
687 735
688 736 if tokens_by_line[-1][-1].string == ':':
689 737 # The last line starts a block (e.g. 'if foo:')
690 738 ix = 0
691 739 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
692 740 ix += 1
693 741
694 742 indent = tokens_by_line[-1][ix].start[1]
695 743 return 'incomplete', indent + 4
696 744
697 745 if tokens_by_line[-1][0].line.endswith('\\'):
698 746 return 'incomplete', None
699 747
700 748 # At this point, our checks think the code is complete (or invalid).
701 749 # We'll use codeop.compile_command to check this with the real parser
702 750 try:
703 751 with warnings.catch_warnings():
704 752 warnings.simplefilter('error', SyntaxWarning)
705 753 res = compile_command(''.join(lines), symbol='exec')
706 754 except (SyntaxError, OverflowError, ValueError, TypeError,
707 755 MemoryError, SyntaxWarning):
708 756 return 'invalid', None
709 757 else:
710 758 if res is None:
711 759 return 'incomplete', find_last_indent(lines)
712 760
713 761 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
714 762 if ends_with_newline:
715 763 return 'complete', None
716 764 return 'incomplete', find_last_indent(lines)
717 765
718 766 # If there's a blank line at the end, assume we're ready to execute
719 767 if not lines[-1].strip():
720 768 return 'complete', None
721 769
722 770 return 'complete', None
723 771
724 772
725 773 def find_last_indent(lines):
726 774 m = _indent_re.match(lines[-1])
727 775 if not m:
728 776 return 0
729 777 return len(m.group(0).replace('\t', ' '*4))
778
779
780 class MaybeAsyncCompile(Compile):
781 def __init__(self, extra_flags=0):
782 super().__init__()
783 self.flags |= extra_flags
784
785 def __call__(self, *args, **kwds):
786 return compile(*args, **kwds)
787
788
789 class MaybeAsyncCommandCompiler(CommandCompiler):
790 def __init__(self, extra_flags=0):
791 self.compiler = MaybeAsyncCompile(extra_flags=extra_flags)
792
793
794 if (sys.version_info.major, sys.version_info.minor) >= (3, 8):
795 _extra_flags = ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
796 else:
797 _extra_flags = ast.PyCF_ONLY_AST
798
799 compile_command = MaybeAsyncCommandCompiler(extra_flags=_extra_flags)
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now