##// END OF EJS Templates
Merge branch 'main' into latex_rendering_tempdir
Pieter Eendebak -
r27727:e41930dc merge
parent child Browse files
Show More
@@ -0,0 +1,40 b''
1 import pytest
2 from IPython.terminal.shortcuts import _apply_autosuggest
3
4 from unittest.mock import Mock
5
6
7 def make_event(text, cursor, suggestion):
8 event = Mock()
9 event.current_buffer = Mock()
10 event.current_buffer.suggestion = Mock()
11 event.current_buffer.cursor_position = cursor
12 event.current_buffer.suggestion.text = suggestion
13 event.current_buffer.document = Mock()
14 event.current_buffer.document.get_end_of_line_position = Mock(return_value=0)
15 event.current_buffer.document.text = text
16 event.current_buffer.document.cursor_position = cursor
17 return event
18
19
20 @pytest.mark.parametrize(
21 "text, cursor, suggestion, called",
22 [
23 ("123456", 6, "123456789", True),
24 ("123456", 3, "123456789", False),
25 ("123456 \n789", 6, "123456789", True),
26 ],
27 )
28 def test_autosuggest_at_EOL(text, cursor, suggestion, called):
29 """
30 test that autosuggest is only applied at end of line.
31 """
32
33 event = make_event(text, cursor, suggestion)
34 event.current_buffer.insert_text = Mock()
35 _apply_autosuggest(event)
36 if called:
37 event.current_buffer.insert_text.assert_called()
38 else:
39 event.current_buffer.insert_text.assert_not_called()
40 # event.current_buffer.document.get_end_of_line_position.assert_called()
@@ -1,34 +1,34 b''
1 1 name: Run MyPy
2 2
3 3 on:
4 4 push:
5 branches: [ master, 7.x]
5 branches: [ main, 7.x]
6 6 pull_request:
7 branches: [ master, 7.x]
7 branches: [ main, 7.x]
8 8
9 9 jobs:
10 10 build:
11 11
12 12 runs-on: ubuntu-latest
13 13 strategy:
14 14 matrix:
15 15 python-version: [3.8]
16 16
17 17 steps:
18 18 - uses: actions/checkout@v2
19 19 - name: Set up Python ${{ matrix.python-version }}
20 20 uses: actions/setup-python@v2
21 21 with:
22 22 python-version: ${{ matrix.python-version }}
23 23 - name: Install dependencies
24 24 run: |
25 25 python -m pip install --upgrade pip
26 26 pip install mypy pyflakes flake8
27 27 - name: Lint with mypy
28 28 run: |
29 29 mypy -p IPython.terminal
30 30 mypy -p IPython.core.magics
31 31 - name: Lint with pyflakes
32 32 run: |
33 33 flake8 IPython/core/magics/script.py
34 34 flake8 IPython/core/magics/packaging.py
@@ -1,40 +1,40 b''
1 1 # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 2 # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 3
4 4 name: Python package
5 5
6 6 on:
7 7 push:
8 branches: [ master, 7.x ]
8 branches: [ main, 7.x ]
9 9 pull_request:
10 branches: [ master, 7.x ]
10 branches: [ main, 7.x ]
11 11
12 12 jobs:
13 13 formatting:
14 14
15 15 runs-on: ubuntu-latest
16 16 timeout-minutes: 5
17 17 strategy:
18 18 matrix:
19 19 python-version: [3.8]
20 20
21 21 steps:
22 22 - uses: actions/checkout@v2
23 23 with:
24 24 fetch-depth: 0
25 25 - name: Set up Python ${{ matrix.python-version }}
26 26 uses: actions/setup-python@v2
27 27 with:
28 28 python-version: ${{ matrix.python-version }}
29 29 - name: Install dependencies
30 30 run: |
31 31 python -m pip install --upgrade pip
32 32 pip install darker black==21.12b0
33 33 - name: Lint with darker
34 34 run: |
35 35 darker -r 60625f241f298b5039cb2debc365db38aa7bb522 --check --diff . || (
36 36 echo "Changes need auto-formatting. Run:"
37 37 echo " darker -r 60625f241f298b5039cb2debc365db38aa7bb522"
38 38 echo "then commit and push changes to fix."
39 39 exit 1
40 40 )
@@ -1,81 +1,80 b''
1 1 name: Run tests
2 2
3 3 on:
4 4 push:
5 5 branches:
6 6 - main
7 - master
8 7 - '*.x'
9 8 pull_request:
10 9 # Run weekly on Monday at 1:23 UTC
11 10 schedule:
12 11 - cron: '23 1 * * 1'
13 12 workflow_dispatch:
14 13
15 14
16 15 jobs:
17 16 test:
18 17 runs-on: ${{ matrix.os }}
19 18 strategy:
20 19 fail-fast: false
21 20 matrix:
22 21 os: [ubuntu-latest, windows-latest]
23 22 python-version: ["3.8", "3.9", "3.10"]
24 23 deps: [test_extra]
25 24 # Test all on ubuntu, test ends on macos
26 25 include:
27 26 - os: macos-latest
28 27 python-version: "3.8"
29 28 deps: test_extra
30 29 - os: macos-latest
31 30 python-version: "3.10"
32 31 deps: test_extra
33 32 # Tests minimal dependencies set
34 33 - os: ubuntu-latest
35 34 python-version: "3.10"
36 35 deps: test
37 36 # Tests latest development Python version
38 37 - os: ubuntu-latest
39 38 python-version: "3.11-dev"
40 39 deps: test
41 40 # Installing optional dependencies stuff takes ages on PyPy
42 41 - os: ubuntu-latest
43 42 python-version: "pypy-3.8"
44 43 deps: test
45 44 - os: windows-latest
46 45 python-version: "pypy-3.8"
47 46 deps: test
48 47 - os: macos-latest
49 48 python-version: "pypy-3.8"
50 49 deps: test
51 50
52 51 steps:
53 52 - uses: actions/checkout@v2
54 53 - name: Set up Python ${{ matrix.python-version }}
55 54 uses: actions/setup-python@v2
56 55 with:
57 56 python-version: ${{ matrix.python-version }}
58 57 cache: pip
59 58 - name: Install latex
60 59 if: runner.os == 'Linux' && matrix.deps == 'test_extra'
61 60 run: echo "disable latex for now, issues in mirros" #sudo apt-get -yq -o Acquire::Retries=3 --no-install-suggests --no-install-recommends install texlive dvipng
62 61 - name: Install and update Python dependencies
63 62 run: |
64 63 python -m pip install --upgrade pip setuptools wheel build
65 64 python -m pip install --upgrade -e .[${{ matrix.deps }}]
66 65 python -m pip install --upgrade check-manifest pytest-cov
67 66 - name: Try building with Python build
68 67 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
69 68 run: |
70 69 python -m build
71 70 shasum -a 256 dist/*
72 71 - name: Check manifest
73 72 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
74 73 run: check-manifest
75 74 - name: pytest
76 75 env:
77 76 COLUMNS: 120
78 77 run: |
79 78 pytest --color=yes -raXxs ${{ startsWith(matrix.python-version, 'pypy') && ' ' || '--cov --cov-report=xml' }}
80 79 - name: Upload coverage to Codecov
81 80 uses: codecov/codecov-action@v2
@@ -1,90 +1,90 b''
1 1 ## Triaging Issues
2 2
3 3 On the IPython repository, we strive to trust users and give them responsibility.
4 4 By using one of our bots, any user can close issues or add/remove
5 5 labels by mentioning the bot and asking it to do things on your behalf.
6 6
7 7 To close an issue (or PR), even if you did not create it, use the following:
8 8
9 9 > @meeseeksdev close
10 10
11 11 This command can be in the middle of another comment, but must start on its
12 12 own line.
13 13
14 14 To add labels to an issue, ask the bot to `tag` with a comma-separated list of
15 15 tags to add:
16 16
17 17 > @meeseeksdev tag windows, documentation
18 18
19 19 Only already pre-created tags can be added. So far, the list is limited to:
20 20 `async/await`, `backported`, `help wanted`, `documentation`, `notebook`,
21 21 `tab-completion`, `windows`
22 22
23 23 To remove a label, use the `untag` command:
24 24
25 25 > @meeseeksdev untag windows, documentation
26 26
27 27 We'll be adding additional capabilities for the bot and will share them here
28 28 when they are ready to be used.
29 29
30 30 ## Opening an Issue
31 31
32 32 When opening a new Issue, please take the following steps:
33 33
34 34 1. Search GitHub and/or Google for your issue to avoid duplicate reports.
35 35 Keyword searches for your error messages are most helpful.
36 2. If possible, try updating to master and reproducing your issue,
36 2. If possible, try updating to main and reproducing your issue,
37 37 because we may have already fixed it.
38 38 3. Try to include a minimal reproducible test case.
39 39 4. Include relevant system information. Start with the output of:
40 40
41 41 python -c "import IPython; print(IPython.sys_info())"
42 42
43 43 And include any relevant package versions, depending on the issue, such as
44 44 matplotlib, numpy, Qt, Qt bindings (PyQt/PySide), tornado, web browser, etc.
45 45
46 46 ## Pull Requests
47 47
48 48 Some guidelines on contributing to IPython:
49 49
50 50 * All work is submitted via Pull Requests.
51 51 * Pull Requests can be submitted as soon as there is code worth discussing.
52 52 Pull Requests track the branch, so you can continue to work after the PR is submitted.
53 53 Review and discussion can begin well before the work is complete,
54 54 and the more discussion the better.
55 55 The worst case is that the PR is closed.
56 * Pull Requests should generally be made against master
56 * Pull Requests should generally be made against main
57 57 * Pull Requests should be tested, if feasible:
58 58 - bugfixes should include regression tests.
59 59 - new behavior should at least get minimal exercise.
60 60 * New features and backwards-incompatible changes should be documented by adding
61 61 a new file to the [pr](docs/source/whatsnew/pr) directory, see [the README.md
62 62 there](docs/source/whatsnew/pr/README.md) for details.
63 63 * Don't make 'cleanup' pull requests just to change code style.
64 64 We don't follow any style guide strictly, and we consider formatting changes
65 65 unnecessary noise.
66 66 If you're making functional changes, you can clean up the specific pieces of
67 67 code you're working on.
68 68
69 69 [Travis](http://travis-ci.org/#!/ipython/ipython) does a pretty good job testing
70 70 IPython and Pull Requests, but it may make sense to manually perform tests,
71 71 particularly for PRs that affect `IPython.parallel` or Windows.
72 72
73 73 For more detailed information, see our [GitHub Workflow](https://github.com/ipython/ipython/wiki/Dev:-GitHub-workflow).
74 74
75 75 ## Running Tests
76 76
77 77 All the tests can be run by using
78 78 ```shell
79 79 pytest
80 80 ```
81 81
82 82 All the tests for a single module (for example **test_alias**) can be run by using the fully qualified path to the module.
83 83 ```shell
84 84 pytest IPython/core/tests/test_alias.py
85 85 ```
86 86
87 87 Only a single test (for example **test_alias_lifecycle**) within a single file can be run by adding the specific test after a `::` at the end:
88 88 ```shell
89 89 pytest IPython/core/tests/test_alias.py::test_alias_lifecycle
90 90 ```
@@ -1,156 +1,156 b''
1 1 """
2 2 IPython: tools for interactive and parallel computing in Python.
3 3
4 4 https://ipython.org
5 5 """
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (c) 2008-2011, IPython Development Team.
8 8 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
9 9 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
10 10 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
11 11 #
12 12 # Distributed under the terms of the Modified BSD License.
13 13 #
14 14 # The full license is in the file COPYING.txt, distributed with this software.
15 15 #-----------------------------------------------------------------------------
16 16
17 17 #-----------------------------------------------------------------------------
18 18 # Imports
19 19 #-----------------------------------------------------------------------------
20 20
21 21 import os
22 22 import sys
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Setup everything
26 26 #-----------------------------------------------------------------------------
27 27
28 28 # Don't forget to also update setup.py when this changes!
29 29 if sys.version_info < (3, 8):
30 30 raise ImportError(
31 31 """
32 32 IPython 8+ supports Python 3.8 and above, following NEP 29.
33 33 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
34 34 Python 3.3 and 3.4 were supported up to IPython 6.x.
35 35 Python 3.5 was supported with IPython 7.0 to 7.9.
36 36 Python 3.6 was supported with IPython up to 7.16.
37 37 Python 3.7 was still supported with the 7.x branch.
38 38
39 39 See IPython `README.rst` file for more information:
40 40
41 https://github.com/ipython/ipython/blob/master/README.rst
41 https://github.com/ipython/ipython/blob/main/README.rst
42 42
43 43 """
44 44 )
45 45
46 46 #-----------------------------------------------------------------------------
47 47 # Setup the top level names
48 48 #-----------------------------------------------------------------------------
49 49
50 50 from .core.getipython import get_ipython
51 51 from .core import release
52 52 from .core.application import Application
53 53 from .terminal.embed import embed
54 54
55 55 from .core.interactiveshell import InteractiveShell
56 56 from .utils.sysinfo import sys_info
57 57 from .utils.frame import extract_module_locals
58 58
59 59 # Release data
60 60 __author__ = '%s <%s>' % (release.author, release.author_email)
61 61 __license__ = release.license
62 62 __version__ = release.version
63 63 version_info = release.version_info
64 64 # list of CVEs that should have been patched in this release.
65 65 # this is informational and should not be relied upon.
66 66 __patched_cves__ = {"CVE-2022-21699"}
67 67
68 68
69 69 def embed_kernel(module=None, local_ns=None, **kwargs):
70 70 """Embed and start an IPython kernel in a given scope.
71 71
72 72 If you don't want the kernel to initialize the namespace
73 73 from the scope of the surrounding function,
74 74 and/or you want to load full IPython configuration,
75 75 you probably want `IPython.start_kernel()` instead.
76 76
77 77 Parameters
78 78 ----------
79 79 module : types.ModuleType, optional
80 80 The module to load into IPython globals (default: caller)
81 81 local_ns : dict, optional
82 82 The namespace to load into IPython user namespace (default: caller)
83 83 **kwargs : various, optional
84 84 Further keyword args are relayed to the IPKernelApp constructor,
85 85 allowing configuration of the Kernel. Will only have an effect
86 86 on the first embed_kernel call for a given process.
87 87 """
88 88
89 89 (caller_module, caller_locals) = extract_module_locals(1)
90 90 if module is None:
91 91 module = caller_module
92 92 if local_ns is None:
93 93 local_ns = caller_locals
94 94
95 95 # Only import .zmq when we really need it
96 96 from ipykernel.embed import embed_kernel as real_embed_kernel
97 97 real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
98 98
99 99 def start_ipython(argv=None, **kwargs):
100 100 """Launch a normal IPython instance (as opposed to embedded)
101 101
102 102 `IPython.embed()` puts a shell in a particular calling scope,
103 103 such as a function or method for debugging purposes,
104 104 which is often not desirable.
105 105
106 106 `start_ipython()` does full, regular IPython initialization,
107 107 including loading startup files, configuration, etc.
108 108 much of which is skipped by `embed()`.
109 109
110 110 This is a public API method, and will survive implementation changes.
111 111
112 112 Parameters
113 113 ----------
114 114 argv : list or None, optional
115 115 If unspecified or None, IPython will parse command-line options from sys.argv.
116 116 To prevent any command-line parsing, pass an empty list: `argv=[]`.
117 117 user_ns : dict, optional
118 118 specify this dictionary to initialize the IPython user namespace with particular values.
119 119 **kwargs : various, optional
120 120 Any other kwargs will be passed to the Application constructor,
121 121 such as `config`.
122 122 """
123 123 from IPython.terminal.ipapp import launch_new_instance
124 124 return launch_new_instance(argv=argv, **kwargs)
125 125
126 126 def start_kernel(argv=None, **kwargs):
127 127 """Launch a normal IPython kernel instance (as opposed to embedded)
128 128
129 129 `IPython.embed_kernel()` puts a shell in a particular calling scope,
130 130 such as a function or method for debugging purposes,
131 131 which is often not desirable.
132 132
133 133 `start_kernel()` does full, regular IPython initialization,
134 134 including loading startup files, configuration, etc.
135 135 much of which is skipped by `embed()`.
136 136
137 137 Parameters
138 138 ----------
139 139 argv : list or None, optional
140 140 If unspecified or None, IPython will parse command-line options from sys.argv.
141 141 To prevent any command-line parsing, pass an empty list: `argv=[]`.
142 142 user_ns : dict, optional
143 143 specify this dictionary to initialize the IPython user namespace with particular values.
144 144 **kwargs : various, optional
145 145 Any other kwargs will be passed to the Application constructor,
146 146 such as `config`.
147 147 """
148 148 import warnings
149 149
150 150 warnings.warn(
151 151 "start_kernel is deprecated since IPython 8.0, use from `ipykernel.kernelapp.launch_new_instance`",
152 152 DeprecationWarning,
153 153 stacklevel=2,
154 154 )
155 155 from ipykernel.kernelapp import launch_new_instance
156 156 return launch_new_instance(argv=argv, **kwargs)
@@ -1,2272 +1,2276 b''
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 Ξ±
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 Ξ±
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press ``<tab>`` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\Ξ±<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 ``Completer.backslash_combining_completions`` option to ``False``.
63 63
64 64
65 65 Experimental
66 66 ============
67 67
68 68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 69 generate completions both using static analysis of the code, and dynamically
70 70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 71 for Python. The APIs attached to this new mechanism is unstable and will
72 72 raise unless use in an :any:`provisionalcompleter` context manager.
73 73
74 74 You will find that the following are experimental:
75 75
76 76 - :any:`provisionalcompleter`
77 77 - :any:`IPCompleter.completions`
78 78 - :any:`Completion`
79 79 - :any:`rectify_completions`
80 80
81 81 .. note::
82 82
83 83 better name for :any:`rectify_completions` ?
84 84
85 85 We welcome any feedback on these new API, and we also encourage you to try this
86 86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 87 to have extra logging information if :any:`jedi` is crashing, or if current
88 88 IPython completer pending deprecations are returning results not yet handled
89 89 by :any:`jedi`
90 90
91 91 Using Jedi for tab completion allow snippets like the following to work without
92 92 having to execute any code:
93 93
94 94 >>> myvar = ['hello', 42]
95 95 ... myvar[1].bi<tab>
96 96
97 97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 98 executing any code unlike the previously available ``IPCompleter.greedy``
99 99 option.
100 100
101 101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 102 current development version to get better completions.
103 103 """
104 104
105 105
106 106 # Copyright (c) IPython Development Team.
107 107 # Distributed under the terms of the Modified BSD License.
108 108 #
109 109 # Some of this code originated from rlcompleter in the Python standard library
110 110 # Copyright (C) 2001 Python Software Foundation, www.python.org
111 111
112 112
113 113 import builtins as builtin_mod
114 114 import glob
115 115 import inspect
116 116 import itertools
117 117 import keyword
118 118 import os
119 119 import re
120 120 import string
121 121 import sys
122 122 import time
123 123 import unicodedata
124 124 import uuid
125 125 import warnings
126 126 from contextlib import contextmanager
127 127 from importlib import import_module
128 128 from types import SimpleNamespace
129 129 from typing import Iterable, Iterator, List, Tuple, Union, Any, Sequence, Dict, NamedTuple, Pattern, Optional
130 130
131 131 from IPython.core.error import TryNext
132 132 from IPython.core.inputtransformer2 import ESC_MAGIC
133 133 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
134 134 from IPython.core.oinspect import InspectColors
135 135 from IPython.testing.skipdoctest import skip_doctest
136 136 from IPython.utils import generics
137 137 from IPython.utils.dir2 import dir2, get_real_method
138 138 from IPython.utils.path import ensure_dir_exists
139 139 from IPython.utils.process import arg_split
140 140 from traitlets import Bool, Enum, Int, List as ListTrait, Unicode, default, observe
141 141 from traitlets.config.configurable import Configurable
142 142
143 143 import __main__
144 144
145 145 # skip module docstests
146 146 __skip_doctest__ = True
147 147
148 148 try:
149 149 import jedi
150 150 jedi.settings.case_insensitive_completion = False
151 151 import jedi.api.helpers
152 152 import jedi.api.classes
153 153 JEDI_INSTALLED = True
154 154 except ImportError:
155 155 JEDI_INSTALLED = False
156 156 #-----------------------------------------------------------------------------
157 157 # Globals
158 158 #-----------------------------------------------------------------------------
159 159
160 160 # ranges where we have most of the valid unicode names. We could be more finer
161 161 # grained but is it worth it for performance While unicode have character in the
162 162 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
163 163 # write this). With below range we cover them all, with a density of ~67%
164 164 # biggest next gap we consider only adds up about 1% density and there are 600
165 165 # gaps that would need hard coding.
166 166 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
167 167
168 168 # Public API
169 169 __all__ = ['Completer','IPCompleter']
170 170
171 171 if sys.platform == 'win32':
172 172 PROTECTABLES = ' '
173 173 else:
174 174 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
175 175
176 176 # Protect against returning an enormous number of completions which the frontend
177 177 # may have trouble processing.
178 178 MATCHES_LIMIT = 500
179 179
180 180
181 181 class ProvisionalCompleterWarning(FutureWarning):
182 182 """
183 183 Exception raise by an experimental feature in this module.
184 184
185 185 Wrap code in :any:`provisionalcompleter` context manager if you
186 186 are certain you want to use an unstable feature.
187 187 """
188 188 pass
189 189
190 190 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
191 191
192 192
193 193 @skip_doctest
194 194 @contextmanager
195 195 def provisionalcompleter(action='ignore'):
196 196 """
197 197 This context manager has to be used in any place where unstable completer
198 198 behavior and API may be called.
199 199
200 200 >>> with provisionalcompleter():
201 201 ... completer.do_experimental_things() # works
202 202
203 203 >>> completer.do_experimental_things() # raises.
204 204
205 205 .. note::
206 206
207 207 Unstable
208 208
209 209 By using this context manager you agree that the API in use may change
210 210 without warning, and that you won't complain if they do so.
211 211
212 212 You also understand that, if the API is not to your liking, you should report
213 213 a bug to explain your use case upstream.
214 214
215 215 We'll be happy to get your feedback, feature requests, and improvements on
216 216 any of the unstable APIs!
217 217 """
218 218 with warnings.catch_warnings():
219 219 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
220 220 yield
221 221
222 222
223 223 def has_open_quotes(s):
224 224 """Return whether a string has open quotes.
225 225
226 226 This simply counts whether the number of quote characters of either type in
227 227 the string is odd.
228 228
229 229 Returns
230 230 -------
231 231 If there is an open quote, the quote character is returned. Else, return
232 232 False.
233 233 """
234 234 # We check " first, then ', so complex cases with nested quotes will get
235 235 # the " to take precedence.
236 236 if s.count('"') % 2:
237 237 return '"'
238 238 elif s.count("'") % 2:
239 239 return "'"
240 240 else:
241 241 return False
242 242
243 243
244 244 def protect_filename(s, protectables=PROTECTABLES):
245 245 """Escape a string to protect certain characters."""
246 246 if set(s) & set(protectables):
247 247 if sys.platform == "win32":
248 248 return '"' + s + '"'
249 249 else:
250 250 return "".join(("\\" + c if c in protectables else c) for c in s)
251 251 else:
252 252 return s
253 253
254 254
255 255 def expand_user(path:str) -> Tuple[str, bool, str]:
256 256 """Expand ``~``-style usernames in strings.
257 257
258 258 This is similar to :func:`os.path.expanduser`, but it computes and returns
259 259 extra information that will be useful if the input was being used in
260 260 computing completions, and you wish to return the completions with the
261 261 original '~' instead of its expanded value.
262 262
263 263 Parameters
264 264 ----------
265 265 path : str
266 266 String to be expanded. If no ~ is present, the output is the same as the
267 267 input.
268 268
269 269 Returns
270 270 -------
271 271 newpath : str
272 272 Result of ~ expansion in the input path.
273 273 tilde_expand : bool
274 274 Whether any expansion was performed or not.
275 275 tilde_val : str
276 276 The value that ~ was replaced with.
277 277 """
278 278 # Default values
279 279 tilde_expand = False
280 280 tilde_val = ''
281 281 newpath = path
282 282
283 283 if path.startswith('~'):
284 284 tilde_expand = True
285 285 rest = len(path)-1
286 286 newpath = os.path.expanduser(path)
287 287 if rest:
288 288 tilde_val = newpath[:-rest]
289 289 else:
290 290 tilde_val = newpath
291 291
292 292 return newpath, tilde_expand, tilde_val
293 293
294 294
295 295 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
296 296 """Does the opposite of expand_user, with its outputs.
297 297 """
298 298 if tilde_expand:
299 299 return path.replace(tilde_val, '~')
300 300 else:
301 301 return path
302 302
303 303
304 304 def completions_sorting_key(word):
305 305 """key for sorting completions
306 306
307 307 This does several things:
308 308
309 309 - Demote any completions starting with underscores to the end
310 310 - Insert any %magic and %%cellmagic completions in the alphabetical order
311 311 by their name
312 312 """
313 313 prio1, prio2 = 0, 0
314 314
315 315 if word.startswith('__'):
316 316 prio1 = 2
317 317 elif word.startswith('_'):
318 318 prio1 = 1
319 319
320 320 if word.endswith('='):
321 321 prio1 = -1
322 322
323 323 if word.startswith('%%'):
324 324 # If there's another % in there, this is something else, so leave it alone
325 325 if not "%" in word[2:]:
326 326 word = word[2:]
327 327 prio2 = 2
328 328 elif word.startswith('%'):
329 329 if not "%" in word[1:]:
330 330 word = word[1:]
331 331 prio2 = 1
332 332
333 333 return prio1, word, prio2
334 334
335 335
336 336 class _FakeJediCompletion:
337 337 """
338 338 This is a workaround to communicate to the UI that Jedi has crashed and to
339 339 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
340 340
341 341 Added in IPython 6.0 so should likely be removed for 7.0
342 342
343 343 """
344 344
345 345 def __init__(self, name):
346 346
347 347 self.name = name
348 348 self.complete = name
349 349 self.type = 'crashed'
350 350 self.name_with_symbols = name
351 351 self.signature = ''
352 352 self._origin = 'fake'
353 353
354 354 def __repr__(self):
355 355 return '<Fake completion object jedi has crashed>'
356 356
357 357
358 358 class Completion:
359 359 """
360 360 Completion object used and return by IPython completers.
361 361
362 362 .. warning::
363 363
364 364 Unstable
365 365
366 366 This function is unstable, API may change without warning.
367 367 It will also raise unless use in proper context manager.
368 368
369 369 This act as a middle ground :any:`Completion` object between the
370 370 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
371 371 object. While Jedi need a lot of information about evaluator and how the
372 372 code should be ran/inspected, PromptToolkit (and other frontend) mostly
373 373 need user facing information.
374 374
375 375 - Which range should be replaced replaced by what.
376 376 - Some metadata (like completion type), or meta information to displayed to
377 377 the use user.
378 378
379 379 For debugging purpose we can also store the origin of the completion (``jedi``,
380 380 ``IPython.python_matches``, ``IPython.magics_matches``...).
381 381 """
382 382
383 383 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
384 384
385 385 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
386 386 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
387 387 "It may change without warnings. "
388 388 "Use in corresponding context manager.",
389 389 category=ProvisionalCompleterWarning, stacklevel=2)
390 390
391 391 self.start = start
392 392 self.end = end
393 393 self.text = text
394 394 self.type = type
395 395 self.signature = signature
396 396 self._origin = _origin
397 397
398 398 def __repr__(self):
399 399 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
400 400 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
401 401
402 402 def __eq__(self, other)->Bool:
403 403 """
404 404 Equality and hash do not hash the type (as some completer may not be
405 405 able to infer the type), but are use to (partially) de-duplicate
406 406 completion.
407 407
408 408 Completely de-duplicating completion is a bit tricker that just
409 409 comparing as it depends on surrounding text, which Completions are not
410 410 aware of.
411 411 """
412 412 return self.start == other.start and \
413 413 self.end == other.end and \
414 414 self.text == other.text
415 415
416 416 def __hash__(self):
417 417 return hash((self.start, self.end, self.text))
418 418
419 419
420 420 _IC = Iterable[Completion]
421 421
422 422
423 423 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
424 424 """
425 425 Deduplicate a set of completions.
426 426
427 427 .. warning::
428 428
429 429 Unstable
430 430
431 431 This function is unstable, API may change without warning.
432 432
433 433 Parameters
434 434 ----------
435 435 text : str
436 436 text that should be completed.
437 437 completions : Iterator[Completion]
438 438 iterator over the completions to deduplicate
439 439
440 440 Yields
441 441 ------
442 442 `Completions` objects
443 443 Completions coming from multiple sources, may be different but end up having
444 444 the same effect when applied to ``text``. If this is the case, this will
445 445 consider completions as equal and only emit the first encountered.
446 446 Not folded in `completions()` yet for debugging purpose, and to detect when
447 447 the IPython completer does return things that Jedi does not, but should be
448 448 at some point.
449 449 """
450 450 completions = list(completions)
451 451 if not completions:
452 452 return
453 453
454 454 new_start = min(c.start for c in completions)
455 455 new_end = max(c.end for c in completions)
456 456
457 457 seen = set()
458 458 for c in completions:
459 459 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
460 460 if new_text not in seen:
461 461 yield c
462 462 seen.add(new_text)
463 463
464 464
465 465 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
466 466 """
467 467 Rectify a set of completions to all have the same ``start`` and ``end``
468 468
469 469 .. warning::
470 470
471 471 Unstable
472 472
473 473 This function is unstable, API may change without warning.
474 474 It will also raise unless use in proper context manager.
475 475
476 476 Parameters
477 477 ----------
478 478 text : str
479 479 text that should be completed.
480 480 completions : Iterator[Completion]
481 481 iterator over the completions to rectify
482 482 _debug : bool
483 483 Log failed completion
484 484
485 485 Notes
486 486 -----
487 487 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
488 488 the Jupyter Protocol requires them to behave like so. This will readjust
489 489 the completion to have the same ``start`` and ``end`` by padding both
490 490 extremities with surrounding text.
491 491
492 492 During stabilisation should support a ``_debug`` option to log which
493 493 completion are return by the IPython completer and not found in Jedi in
494 494 order to make upstream bug report.
495 495 """
496 496 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
497 497 "It may change without warnings. "
498 498 "Use in corresponding context manager.",
499 499 category=ProvisionalCompleterWarning, stacklevel=2)
500 500
501 501 completions = list(completions)
502 502 if not completions:
503 503 return
504 504 starts = (c.start for c in completions)
505 505 ends = (c.end for c in completions)
506 506
507 507 new_start = min(starts)
508 508 new_end = max(ends)
509 509
510 510 seen_jedi = set()
511 511 seen_python_matches = set()
512 512 for c in completions:
513 513 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
514 514 if c._origin == 'jedi':
515 515 seen_jedi.add(new_text)
516 516 elif c._origin == 'IPCompleter.python_matches':
517 517 seen_python_matches.add(new_text)
518 518 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
519 519 diff = seen_python_matches.difference(seen_jedi)
520 520 if diff and _debug:
521 521 print('IPython.python matches have extras:', diff)
522 522
523 523
524 524 if sys.platform == 'win32':
525 525 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
526 526 else:
527 527 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
528 528
529 529 GREEDY_DELIMS = ' =\r\n'
530 530
531 531
532 532 class CompletionSplitter(object):
533 533 """An object to split an input line in a manner similar to readline.
534 534
535 535 By having our own implementation, we can expose readline-like completion in
536 536 a uniform manner to all frontends. This object only needs to be given the
537 537 line of text to be split and the cursor position on said line, and it
538 538 returns the 'word' to be completed on at the cursor after splitting the
539 539 entire line.
540 540
541 541 What characters are used as splitting delimiters can be controlled by
542 542 setting the ``delims`` attribute (this is a property that internally
543 543 automatically builds the necessary regular expression)"""
544 544
545 545 # Private interface
546 546
547 547 # A string of delimiter characters. The default value makes sense for
548 548 # IPython's most typical usage patterns.
549 549 _delims = DELIMS
550 550
551 551 # The expression (a normal string) to be compiled into a regular expression
552 552 # for actual splitting. We store it as an attribute mostly for ease of
553 553 # debugging, since this type of code can be so tricky to debug.
554 554 _delim_expr = None
555 555
556 556 # The regular expression that does the actual splitting
557 557 _delim_re = None
558 558
559 559 def __init__(self, delims=None):
560 560 delims = CompletionSplitter._delims if delims is None else delims
561 561 self.delims = delims
562 562
563 563 @property
564 564 def delims(self):
565 565 """Return the string of delimiter characters."""
566 566 return self._delims
567 567
568 568 @delims.setter
569 569 def delims(self, delims):
570 570 """Set the delimiters for line splitting."""
571 571 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
572 572 self._delim_re = re.compile(expr)
573 573 self._delims = delims
574 574 self._delim_expr = expr
575 575
576 576 def split_line(self, line, cursor_pos=None):
577 577 """Split a line of text with a cursor at the given position.
578 578 """
579 579 l = line if cursor_pos is None else line[:cursor_pos]
580 580 return self._delim_re.split(l)[-1]
581 581
582 582
583 583
584 584 class Completer(Configurable):
585 585
586 586 greedy = Bool(False,
587 587 help="""Activate greedy completion
588 588 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
589 589
590 590 This will enable completion on elements of lists, results of function calls, etc.,
591 591 but can be unsafe because the code is actually evaluated on TAB.
592 592 """,
593 593 ).tag(config=True)
594 594
595 595 use_jedi = Bool(default_value=JEDI_INSTALLED,
596 596 help="Experimental: Use Jedi to generate autocompletions. "
597 597 "Default to True if jedi is installed.").tag(config=True)
598 598
599 599 jedi_compute_type_timeout = Int(default_value=400,
600 600 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
601 601 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
602 602 performance by preventing jedi to build its cache.
603 603 """).tag(config=True)
604 604
605 605 debug = Bool(default_value=False,
606 606 help='Enable debug for the Completer. Mostly print extra '
607 607 'information for experimental jedi integration.')\
608 608 .tag(config=True)
609 609
610 610 backslash_combining_completions = Bool(True,
611 611 help="Enable unicode completions, e.g. \\alpha<tab> . "
612 612 "Includes completion of latex commands, unicode names, and expanding "
613 613 "unicode characters back to latex commands.").tag(config=True)
614 614
615 615 def __init__(self, namespace=None, global_namespace=None, **kwargs):
616 616 """Create a new completer for the command line.
617 617
618 618 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
619 619
620 620 If unspecified, the default namespace where completions are performed
621 621 is __main__ (technically, __main__.__dict__). Namespaces should be
622 622 given as dictionaries.
623 623
624 624 An optional second namespace can be given. This allows the completer
625 625 to handle cases where both the local and global scopes need to be
626 626 distinguished.
627 627 """
628 628
629 629 # Don't bind to namespace quite yet, but flag whether the user wants a
630 630 # specific namespace or to use __main__.__dict__. This will allow us
631 631 # to bind to __main__.__dict__ at completion time, not now.
632 632 if namespace is None:
633 633 self.use_main_ns = True
634 634 else:
635 635 self.use_main_ns = False
636 636 self.namespace = namespace
637 637
638 638 # The global namespace, if given, can be bound directly
639 639 if global_namespace is None:
640 640 self.global_namespace = {}
641 641 else:
642 642 self.global_namespace = global_namespace
643 643
644 644 self.custom_matchers = []
645 645
646 646 super(Completer, self).__init__(**kwargs)
647 647
648 648 def complete(self, text, state):
649 649 """Return the next possible completion for 'text'.
650 650
651 651 This is called successively with state == 0, 1, 2, ... until it
652 652 returns None. The completion should begin with 'text'.
653 653
654 654 """
655 655 if self.use_main_ns:
656 656 self.namespace = __main__.__dict__
657 657
658 658 if state == 0:
659 659 if "." in text:
660 660 self.matches = self.attr_matches(text)
661 661 else:
662 662 self.matches = self.global_matches(text)
663 663 try:
664 664 return self.matches[state]
665 665 except IndexError:
666 666 return None
667 667
668 668 def global_matches(self, text):
669 669 """Compute matches when text is a simple name.
670 670
671 671 Return a list of all keywords, built-in functions and names currently
672 672 defined in self.namespace or self.global_namespace that match.
673 673
674 674 """
675 675 matches = []
676 676 match_append = matches.append
677 677 n = len(text)
678 for lst in [keyword.kwlist,
679 builtin_mod.__dict__.keys(),
680 self.namespace.keys(),
681 self.global_namespace.keys()]:
678 for lst in [
679 keyword.kwlist,
680 builtin_mod.__dict__.keys(),
681 list(self.namespace.keys()),
682 list(self.global_namespace.keys()),
683 ]:
682 684 for word in lst:
683 685 if word[:n] == text and word != "__builtins__":
684 686 match_append(word)
685 687
686 688 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
687 for lst in [self.namespace.keys(),
688 self.global_namespace.keys()]:
689 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
690 for word in lst if snake_case_re.match(word)}
689 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
690 shortened = {
691 "_".join([sub[0] for sub in word.split("_")]): word
692 for word in lst
693 if snake_case_re.match(word)
694 }
691 695 for word in shortened.keys():
692 696 if word[:n] == text and word != "__builtins__":
693 697 match_append(shortened[word])
694 698 return matches
695 699
696 700 def attr_matches(self, text):
697 701 """Compute matches when text contains a dot.
698 702
699 703 Assuming the text is of the form NAME.NAME....[NAME], and is
700 704 evaluatable in self.namespace or self.global_namespace, it will be
701 705 evaluated and its attributes (as revealed by dir()) are used as
702 706 possible completions. (For class instances, class members are
703 707 also considered.)
704 708
705 709 WARNING: this can still invoke arbitrary C code, if an object
706 710 with a __getattr__ hook is evaluated.
707 711
708 712 """
709 713
710 714 # Another option, seems to work great. Catches things like ''.<tab>
711 715 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
712 716
713 717 if m:
714 718 expr, attr = m.group(1, 3)
715 719 elif self.greedy:
716 720 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
717 721 if not m2:
718 722 return []
719 723 expr, attr = m2.group(1,2)
720 724 else:
721 725 return []
722 726
723 727 try:
724 728 obj = eval(expr, self.namespace)
725 729 except:
726 730 try:
727 731 obj = eval(expr, self.global_namespace)
728 732 except:
729 733 return []
730 734
731 735 if self.limit_to__all__ and hasattr(obj, '__all__'):
732 736 words = get__all__entries(obj)
733 737 else:
734 738 words = dir2(obj)
735 739
736 740 try:
737 741 words = generics.complete_object(obj, words)
738 742 except TryNext:
739 743 pass
740 744 except AssertionError:
741 745 raise
742 746 except Exception:
743 747 # Silence errors from completion function
744 748 #raise # dbg
745 749 pass
746 750 # Build match list to return
747 751 n = len(attr)
748 752 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
749 753
750 754
751 755 def get__all__entries(obj):
752 756 """returns the strings in the __all__ attribute"""
753 757 try:
754 758 words = getattr(obj, '__all__')
755 759 except:
756 760 return []
757 761
758 762 return [w for w in words if isinstance(w, str)]
759 763
760 764
761 765 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
762 766 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
763 767 """Used by dict_key_matches, matching the prefix to a list of keys
764 768
765 769 Parameters
766 770 ----------
767 771 keys
768 772 list of keys in dictionary currently being completed.
769 773 prefix
770 774 Part of the text already typed by the user. E.g. `mydict[b'fo`
771 775 delims
772 776 String of delimiters to consider when finding the current key.
773 777 extra_prefix : optional
774 778 Part of the text already typed in multi-key index cases. E.g. for
775 779 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
776 780
777 781 Returns
778 782 -------
779 783 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
780 784 ``quote`` being the quote that need to be used to close current string.
781 785 ``token_start`` the position where the replacement should start occurring,
782 786 ``matches`` a list of replacement/completion
783 787
784 788 """
785 789 prefix_tuple = extra_prefix if extra_prefix else ()
786 790 Nprefix = len(prefix_tuple)
787 791 def filter_prefix_tuple(key):
788 792 # Reject too short keys
789 793 if len(key) <= Nprefix:
790 794 return False
791 795 # Reject keys with non str/bytes in it
792 796 for k in key:
793 797 if not isinstance(k, (str, bytes)):
794 798 return False
795 799 # Reject keys that do not match the prefix
796 800 for k, pt in zip(key, prefix_tuple):
797 801 if k != pt:
798 802 return False
799 803 # All checks passed!
800 804 return True
801 805
802 806 filtered_keys:List[Union[str,bytes]] = []
803 807 def _add_to_filtered_keys(key):
804 808 if isinstance(key, (str, bytes)):
805 809 filtered_keys.append(key)
806 810
807 811 for k in keys:
808 812 if isinstance(k, tuple):
809 813 if filter_prefix_tuple(k):
810 814 _add_to_filtered_keys(k[Nprefix])
811 815 else:
812 816 _add_to_filtered_keys(k)
813 817
814 818 if not prefix:
815 819 return '', 0, [repr(k) for k in filtered_keys]
816 820 quote_match = re.search('["\']', prefix)
817 821 assert quote_match is not None # silence mypy
818 822 quote = quote_match.group()
819 823 try:
820 824 prefix_str = eval(prefix + quote, {})
821 825 except Exception:
822 826 return '', 0, []
823 827
824 828 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
825 829 token_match = re.search(pattern, prefix, re.UNICODE)
826 830 assert token_match is not None # silence mypy
827 831 token_start = token_match.start()
828 832 token_prefix = token_match.group()
829 833
830 834 matched:List[str] = []
831 835 for key in filtered_keys:
832 836 try:
833 837 if not key.startswith(prefix_str):
834 838 continue
835 839 except (AttributeError, TypeError, UnicodeError):
836 840 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
837 841 continue
838 842
839 843 # reformat remainder of key to begin with prefix
840 844 rem = key[len(prefix_str):]
841 845 # force repr wrapped in '
842 846 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
843 847 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
844 848 if quote == '"':
845 849 # The entered prefix is quoted with ",
846 850 # but the match is quoted with '.
847 851 # A contained " hence needs escaping for comparison:
848 852 rem_repr = rem_repr.replace('"', '\\"')
849 853
850 854 # then reinsert prefix from start of token
851 855 matched.append('%s%s' % (token_prefix, rem_repr))
852 856 return quote, token_start, matched
853 857
854 858
855 859 def cursor_to_position(text:str, line:int, column:int)->int:
856 860 """
857 861 Convert the (line,column) position of the cursor in text to an offset in a
858 862 string.
859 863
860 864 Parameters
861 865 ----------
862 866 text : str
863 867 The text in which to calculate the cursor offset
864 868 line : int
865 869 Line of the cursor; 0-indexed
866 870 column : int
867 871 Column of the cursor 0-indexed
868 872
869 873 Returns
870 874 -------
871 875 Position of the cursor in ``text``, 0-indexed.
872 876
873 877 See Also
874 878 --------
875 879 position_to_cursor : reciprocal of this function
876 880
877 881 """
878 882 lines = text.split('\n')
879 883 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
880 884
881 885 return sum(len(l) + 1 for l in lines[:line]) + column
882 886
883 887 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
884 888 """
885 889 Convert the position of the cursor in text (0 indexed) to a line
886 890 number(0-indexed) and a column number (0-indexed) pair
887 891
888 892 Position should be a valid position in ``text``.
889 893
890 894 Parameters
891 895 ----------
892 896 text : str
893 897 The text in which to calculate the cursor offset
894 898 offset : int
895 899 Position of the cursor in ``text``, 0-indexed.
896 900
897 901 Returns
898 902 -------
899 903 (line, column) : (int, int)
900 904 Line of the cursor; 0-indexed, column of the cursor 0-indexed
901 905
902 906 See Also
903 907 --------
904 908 cursor_to_position : reciprocal of this function
905 909
906 910 """
907 911
908 912 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
909 913
910 914 before = text[:offset]
911 915 blines = before.split('\n') # ! splitnes trim trailing \n
912 916 line = before.count('\n')
913 917 col = len(blines[-1])
914 918 return line, col
915 919
916 920
917 921 def _safe_isinstance(obj, module, class_name):
918 922 """Checks if obj is an instance of module.class_name if loaded
919 923 """
920 924 return (module in sys.modules and
921 925 isinstance(obj, getattr(import_module(module), class_name)))
922 926
923 927 def back_unicode_name_matches(text:str) -> Tuple[str, Sequence[str]]:
924 928 """Match Unicode characters back to Unicode name
925 929
926 930 This does ``β˜ƒ`` -> ``\\snowman``
927 931
928 932 Note that snowman is not a valid python3 combining character but will be expanded.
929 933 Though it will not recombine back to the snowman character by the completion machinery.
930 934
931 935 This will not either back-complete standard sequences like \\n, \\b ...
932 936
933 937 Returns
934 938 =======
935 939
936 940 Return a tuple with two elements:
937 941
938 942 - The Unicode character that was matched (preceded with a backslash), or
939 943 empty string,
940 944 - a sequence (of 1), name for the match Unicode character, preceded by
941 945 backslash, or empty if no match.
942 946
943 947 """
944 948 if len(text)<2:
945 949 return '', ()
946 950 maybe_slash = text[-2]
947 951 if maybe_slash != '\\':
948 952 return '', ()
949 953
950 954 char = text[-1]
951 955 # no expand on quote for completion in strings.
952 956 # nor backcomplete standard ascii keys
953 957 if char in string.ascii_letters or char in ('"',"'"):
954 958 return '', ()
955 959 try :
956 960 unic = unicodedata.name(char)
957 961 return '\\'+char,('\\'+unic,)
958 962 except KeyError:
959 963 pass
960 964 return '', ()
961 965
962 966 def back_latex_name_matches(text:str) -> Tuple[str, Sequence[str]] :
963 967 """Match latex characters back to unicode name
964 968
965 969 This does ``\\β„΅`` -> ``\\aleph``
966 970
967 971 """
968 972 if len(text)<2:
969 973 return '', ()
970 974 maybe_slash = text[-2]
971 975 if maybe_slash != '\\':
972 976 return '', ()
973 977
974 978
975 979 char = text[-1]
976 980 # no expand on quote for completion in strings.
977 981 # nor backcomplete standard ascii keys
978 982 if char in string.ascii_letters or char in ('"',"'"):
979 983 return '', ()
980 984 try :
981 985 latex = reverse_latex_symbol[char]
982 986 # '\\' replace the \ as well
983 987 return '\\'+char,[latex]
984 988 except KeyError:
985 989 pass
986 990 return '', ()
987 991
988 992
989 993 def _formatparamchildren(parameter) -> str:
990 994 """
991 995 Get parameter name and value from Jedi Private API
992 996
993 997 Jedi does not expose a simple way to get `param=value` from its API.
994 998
995 999 Parameters
996 1000 ----------
997 1001 parameter
998 1002 Jedi's function `Param`
999 1003
1000 1004 Returns
1001 1005 -------
1002 1006 A string like 'a', 'b=1', '*args', '**kwargs'
1003 1007
1004 1008 """
1005 1009 description = parameter.description
1006 1010 if not description.startswith('param '):
1007 1011 raise ValueError('Jedi function parameter description have change format.'
1008 1012 'Expected "param ...", found %r".' % description)
1009 1013 return description[6:]
1010 1014
1011 1015 def _make_signature(completion)-> str:
1012 1016 """
1013 1017 Make the signature from a jedi completion
1014 1018
1015 1019 Parameters
1016 1020 ----------
1017 1021 completion : jedi.Completion
1018 1022 object does not complete a function type
1019 1023
1020 1024 Returns
1021 1025 -------
1022 1026 a string consisting of the function signature, with the parenthesis but
1023 1027 without the function name. example:
1024 1028 `(a, *args, b=1, **kwargs)`
1025 1029
1026 1030 """
1027 1031
1028 1032 # it looks like this might work on jedi 0.17
1029 1033 if hasattr(completion, 'get_signatures'):
1030 1034 signatures = completion.get_signatures()
1031 1035 if not signatures:
1032 1036 return '(?)'
1033 1037
1034 1038 c0 = completion.get_signatures()[0]
1035 1039 return '('+c0.to_string().split('(', maxsplit=1)[1]
1036 1040
1037 1041 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1038 1042 for p in signature.defined_names()) if f])
1039 1043
1040 1044
1041 1045 class _CompleteResult(NamedTuple):
1042 1046 matched_text : str
1043 1047 matches: Sequence[str]
1044 1048 matches_origin: Sequence[str]
1045 1049 jedi_matches: Any
1046 1050
1047 1051
1048 1052 class IPCompleter(Completer):
1049 1053 """Extension of the completer class with IPython-specific features"""
1050 1054
1051 1055 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1052 1056
1053 1057 @observe('greedy')
1054 1058 def _greedy_changed(self, change):
1055 1059 """update the splitter and readline delims when greedy is changed"""
1056 1060 if change['new']:
1057 1061 self.splitter.delims = GREEDY_DELIMS
1058 1062 else:
1059 1063 self.splitter.delims = DELIMS
1060 1064
1061 1065 dict_keys_only = Bool(False,
1062 1066 help="""Whether to show dict key matches only""")
1063 1067
1064 1068 merge_completions = Bool(True,
1065 1069 help="""Whether to merge completion results into a single list
1066 1070
1067 1071 If False, only the completion results from the first non-empty
1068 1072 completer will be returned.
1069 1073 """
1070 1074 ).tag(config=True)
1071 1075 omit__names = Enum((0,1,2), default_value=2,
1072 1076 help="""Instruct the completer to omit private method names
1073 1077
1074 1078 Specifically, when completing on ``object.<tab>``.
1075 1079
1076 1080 When 2 [default]: all names that start with '_' will be excluded.
1077 1081
1078 1082 When 1: all 'magic' names (``__foo__``) will be excluded.
1079 1083
1080 1084 When 0: nothing will be excluded.
1081 1085 """
1082 1086 ).tag(config=True)
1083 1087 limit_to__all__ = Bool(False,
1084 1088 help="""
1085 1089 DEPRECATED as of version 5.0.
1086 1090
1087 1091 Instruct the completer to use __all__ for the completion
1088 1092
1089 1093 Specifically, when completing on ``object.<tab>``.
1090 1094
1091 1095 When True: only those names in obj.__all__ will be included.
1092 1096
1093 1097 When False [default]: the __all__ attribute is ignored
1094 1098 """,
1095 1099 ).tag(config=True)
1096 1100
1097 1101 profile_completions = Bool(
1098 1102 default_value=False,
1099 1103 help="If True, emit profiling data for completion subsystem using cProfile."
1100 1104 ).tag(config=True)
1101 1105
1102 1106 profiler_output_dir = Unicode(
1103 1107 default_value=".completion_profiles",
1104 1108 help="Template for path at which to output profile data for completions."
1105 1109 ).tag(config=True)
1106 1110
1107 1111 @observe('limit_to__all__')
1108 1112 def _limit_to_all_changed(self, change):
1109 1113 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1110 1114 'value has been deprecated since IPython 5.0, will be made to have '
1111 1115 'no effects and then removed in future version of IPython.',
1112 1116 UserWarning)
1113 1117
1114 1118 def __init__(
1115 1119 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1116 1120 ):
1117 1121 """IPCompleter() -> completer
1118 1122
1119 1123 Return a completer object.
1120 1124
1121 1125 Parameters
1122 1126 ----------
1123 1127 shell
1124 1128 a pointer to the ipython shell itself. This is needed
1125 1129 because this completer knows about magic functions, and those can
1126 1130 only be accessed via the ipython instance.
1127 1131 namespace : dict, optional
1128 1132 an optional dict where completions are performed.
1129 1133 global_namespace : dict, optional
1130 1134 secondary optional dict for completions, to
1131 1135 handle cases (such as IPython embedded inside functions) where
1132 1136 both Python scopes are visible.
1133 1137 config : Config
1134 1138 traitlet's config object
1135 1139 **kwargs
1136 1140 passed to super class unmodified.
1137 1141 """
1138 1142
1139 1143 self.magic_escape = ESC_MAGIC
1140 1144 self.splitter = CompletionSplitter()
1141 1145
1142 1146 # _greedy_changed() depends on splitter and readline being defined:
1143 1147 super().__init__(
1144 1148 namespace=namespace,
1145 1149 global_namespace=global_namespace,
1146 1150 config=config,
1147 1151 **kwargs
1148 1152 )
1149 1153
1150 1154 # List where completion matches will be stored
1151 1155 self.matches = []
1152 1156 self.shell = shell
1153 1157 # Regexp to split filenames with spaces in them
1154 1158 self.space_name_re = re.compile(r'([^\\] )')
1155 1159 # Hold a local ref. to glob.glob for speed
1156 1160 self.glob = glob.glob
1157 1161
1158 1162 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1159 1163 # buffers, to avoid completion problems.
1160 1164 term = os.environ.get('TERM','xterm')
1161 1165 self.dumb_terminal = term in ['dumb','emacs']
1162 1166
1163 1167 # Special handling of backslashes needed in win32 platforms
1164 1168 if sys.platform == "win32":
1165 1169 self.clean_glob = self._clean_glob_win32
1166 1170 else:
1167 1171 self.clean_glob = self._clean_glob
1168 1172
1169 1173 #regexp to parse docstring for function signature
1170 1174 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1171 1175 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1172 1176 #use this if positional argument name is also needed
1173 1177 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1174 1178
1175 1179 self.magic_arg_matchers = [
1176 1180 self.magic_config_matches,
1177 1181 self.magic_color_matches,
1178 1182 ]
1179 1183
1180 1184 # This is set externally by InteractiveShell
1181 1185 self.custom_completers = None
1182 1186
1183 1187 # This is a list of names of unicode characters that can be completed
1184 1188 # into their corresponding unicode value. The list is large, so we
1185 1189 # lazily initialize it on first use. Consuming code should access this
1186 1190 # attribute through the `@unicode_names` property.
1187 1191 self._unicode_names = None
1188 1192
1189 1193 @property
1190 1194 def matchers(self) -> List[Any]:
1191 1195 """All active matcher routines for completion"""
1192 1196 if self.dict_keys_only:
1193 1197 return [self.dict_key_matches]
1194 1198
1195 1199 if self.use_jedi:
1196 1200 return [
1197 1201 *self.custom_matchers,
1198 1202 self.dict_key_matches,
1199 1203 self.file_matches,
1200 1204 self.magic_matches,
1201 1205 ]
1202 1206 else:
1203 1207 return [
1204 1208 *self.custom_matchers,
1205 1209 self.dict_key_matches,
1206 1210 self.python_matches,
1207 1211 self.file_matches,
1208 1212 self.magic_matches,
1209 1213 self.python_func_kw_matches,
1210 1214 ]
1211 1215
1212 1216 def all_completions(self, text:str) -> List[str]:
1213 1217 """
1214 1218 Wrapper around the completion methods for the benefit of emacs.
1215 1219 """
1216 1220 prefix = text.rpartition('.')[0]
1217 1221 with provisionalcompleter():
1218 1222 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1219 1223 for c in self.completions(text, len(text))]
1220 1224
1221 1225 return self.complete(text)[1]
1222 1226
1223 1227 def _clean_glob(self, text:str):
1224 1228 return self.glob("%s*" % text)
1225 1229
1226 1230 def _clean_glob_win32(self, text:str):
1227 1231 return [f.replace("\\","/")
1228 1232 for f in self.glob("%s*" % text)]
1229 1233
1230 1234 def file_matches(self, text:str)->List[str]:
1231 1235 """Match filenames, expanding ~USER type strings.
1232 1236
1233 1237 Most of the seemingly convoluted logic in this completer is an
1234 1238 attempt to handle filenames with spaces in them. And yet it's not
1235 1239 quite perfect, because Python's readline doesn't expose all of the
1236 1240 GNU readline details needed for this to be done correctly.
1237 1241
1238 1242 For a filename with a space in it, the printed completions will be
1239 1243 only the parts after what's already been typed (instead of the
1240 1244 full completions, as is normally done). I don't think with the
1241 1245 current (as of Python 2.3) Python readline it's possible to do
1242 1246 better."""
1243 1247
1244 1248 # chars that require escaping with backslash - i.e. chars
1245 1249 # that readline treats incorrectly as delimiters, but we
1246 1250 # don't want to treat as delimiters in filename matching
1247 1251 # when escaped with backslash
1248 1252 if text.startswith('!'):
1249 1253 text = text[1:]
1250 1254 text_prefix = u'!'
1251 1255 else:
1252 1256 text_prefix = u''
1253 1257
1254 1258 text_until_cursor = self.text_until_cursor
1255 1259 # track strings with open quotes
1256 1260 open_quotes = has_open_quotes(text_until_cursor)
1257 1261
1258 1262 if '(' in text_until_cursor or '[' in text_until_cursor:
1259 1263 lsplit = text
1260 1264 else:
1261 1265 try:
1262 1266 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1263 1267 lsplit = arg_split(text_until_cursor)[-1]
1264 1268 except ValueError:
1265 1269 # typically an unmatched ", or backslash without escaped char.
1266 1270 if open_quotes:
1267 1271 lsplit = text_until_cursor.split(open_quotes)[-1]
1268 1272 else:
1269 1273 return []
1270 1274 except IndexError:
1271 1275 # tab pressed on empty line
1272 1276 lsplit = ""
1273 1277
1274 1278 if not open_quotes and lsplit != protect_filename(lsplit):
1275 1279 # if protectables are found, do matching on the whole escaped name
1276 1280 has_protectables = True
1277 1281 text0,text = text,lsplit
1278 1282 else:
1279 1283 has_protectables = False
1280 1284 text = os.path.expanduser(text)
1281 1285
1282 1286 if text == "":
1283 1287 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1284 1288
1285 1289 # Compute the matches from the filesystem
1286 1290 if sys.platform == 'win32':
1287 1291 m0 = self.clean_glob(text)
1288 1292 else:
1289 1293 m0 = self.clean_glob(text.replace('\\', ''))
1290 1294
1291 1295 if has_protectables:
1292 1296 # If we had protectables, we need to revert our changes to the
1293 1297 # beginning of filename so that we don't double-write the part
1294 1298 # of the filename we have so far
1295 1299 len_lsplit = len(lsplit)
1296 1300 matches = [text_prefix + text0 +
1297 1301 protect_filename(f[len_lsplit:]) for f in m0]
1298 1302 else:
1299 1303 if open_quotes:
1300 1304 # if we have a string with an open quote, we don't need to
1301 1305 # protect the names beyond the quote (and we _shouldn't_, as
1302 1306 # it would cause bugs when the filesystem call is made).
1303 1307 matches = m0 if sys.platform == "win32" else\
1304 1308 [protect_filename(f, open_quotes) for f in m0]
1305 1309 else:
1306 1310 matches = [text_prefix +
1307 1311 protect_filename(f) for f in m0]
1308 1312
1309 1313 # Mark directories in input list by appending '/' to their names.
1310 1314 return [x+'/' if os.path.isdir(x) else x for x in matches]
1311 1315
1312 1316 def magic_matches(self, text:str):
1313 1317 """Match magics"""
1314 1318 # Get all shell magics now rather than statically, so magics loaded at
1315 1319 # runtime show up too.
1316 1320 lsm = self.shell.magics_manager.lsmagic()
1317 1321 line_magics = lsm['line']
1318 1322 cell_magics = lsm['cell']
1319 1323 pre = self.magic_escape
1320 1324 pre2 = pre+pre
1321 1325
1322 1326 explicit_magic = text.startswith(pre)
1323 1327
1324 1328 # Completion logic:
1325 1329 # - user gives %%: only do cell magics
1326 1330 # - user gives %: do both line and cell magics
1327 1331 # - no prefix: do both
1328 1332 # In other words, line magics are skipped if the user gives %% explicitly
1329 1333 #
1330 1334 # We also exclude magics that match any currently visible names:
1331 1335 # https://github.com/ipython/ipython/issues/4877, unless the user has
1332 1336 # typed a %:
1333 1337 # https://github.com/ipython/ipython/issues/10754
1334 1338 bare_text = text.lstrip(pre)
1335 1339 global_matches = self.global_matches(bare_text)
1336 1340 if not explicit_magic:
1337 1341 def matches(magic):
1338 1342 """
1339 1343 Filter magics, in particular remove magics that match
1340 1344 a name present in global namespace.
1341 1345 """
1342 1346 return ( magic.startswith(bare_text) and
1343 1347 magic not in global_matches )
1344 1348 else:
1345 1349 def matches(magic):
1346 1350 return magic.startswith(bare_text)
1347 1351
1348 1352 comp = [ pre2+m for m in cell_magics if matches(m)]
1349 1353 if not text.startswith(pre2):
1350 1354 comp += [ pre+m for m in line_magics if matches(m)]
1351 1355
1352 1356 return comp
1353 1357
1354 1358 def magic_config_matches(self, text:str) -> List[str]:
1355 1359 """ Match class names and attributes for %config magic """
1356 1360 texts = text.strip().split()
1357 1361
1358 1362 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1359 1363 # get all configuration classes
1360 1364 classes = sorted(set([ c for c in self.shell.configurables
1361 1365 if c.__class__.class_traits(config=True)
1362 1366 ]), key=lambda x: x.__class__.__name__)
1363 1367 classnames = [ c.__class__.__name__ for c in classes ]
1364 1368
1365 1369 # return all classnames if config or %config is given
1366 1370 if len(texts) == 1:
1367 1371 return classnames
1368 1372
1369 1373 # match classname
1370 1374 classname_texts = texts[1].split('.')
1371 1375 classname = classname_texts[0]
1372 1376 classname_matches = [ c for c in classnames
1373 1377 if c.startswith(classname) ]
1374 1378
1375 1379 # return matched classes or the matched class with attributes
1376 1380 if texts[1].find('.') < 0:
1377 1381 return classname_matches
1378 1382 elif len(classname_matches) == 1 and \
1379 1383 classname_matches[0] == classname:
1380 1384 cls = classes[classnames.index(classname)].__class__
1381 1385 help = cls.class_get_help()
1382 1386 # strip leading '--' from cl-args:
1383 1387 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1384 1388 return [ attr.split('=')[0]
1385 1389 for attr in help.strip().splitlines()
1386 1390 if attr.startswith(texts[1]) ]
1387 1391 return []
1388 1392
1389 1393 def magic_color_matches(self, text:str) -> List[str] :
1390 1394 """ Match color schemes for %colors magic"""
1391 1395 texts = text.split()
1392 1396 if text.endswith(' '):
1393 1397 # .split() strips off the trailing whitespace. Add '' back
1394 1398 # so that: '%colors ' -> ['%colors', '']
1395 1399 texts.append('')
1396 1400
1397 1401 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1398 1402 prefix = texts[1]
1399 1403 return [ color for color in InspectColors.keys()
1400 1404 if color.startswith(prefix) ]
1401 1405 return []
1402 1406
1403 1407 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str) -> Iterable[Any]:
1404 1408 """
1405 1409 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1406 1410 cursor position.
1407 1411
1408 1412 Parameters
1409 1413 ----------
1410 1414 cursor_column : int
1411 1415 column position of the cursor in ``text``, 0-indexed.
1412 1416 cursor_line : int
1413 1417 line position of the cursor in ``text``, 0-indexed
1414 1418 text : str
1415 1419 text to complete
1416 1420
1417 1421 Notes
1418 1422 -----
1419 1423 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1420 1424 object containing a string with the Jedi debug information attached.
1421 1425 """
1422 1426 namespaces = [self.namespace]
1423 1427 if self.global_namespace is not None:
1424 1428 namespaces.append(self.global_namespace)
1425 1429
1426 1430 completion_filter = lambda x:x
1427 1431 offset = cursor_to_position(text, cursor_line, cursor_column)
1428 1432 # filter output if we are completing for object members
1429 1433 if offset:
1430 1434 pre = text[offset-1]
1431 1435 if pre == '.':
1432 1436 if self.omit__names == 2:
1433 1437 completion_filter = lambda c:not c.name.startswith('_')
1434 1438 elif self.omit__names == 1:
1435 1439 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1436 1440 elif self.omit__names == 0:
1437 1441 completion_filter = lambda x:x
1438 1442 else:
1439 1443 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1440 1444
1441 1445 interpreter = jedi.Interpreter(text[:offset], namespaces)
1442 1446 try_jedi = True
1443 1447
1444 1448 try:
1445 1449 # find the first token in the current tree -- if it is a ' or " then we are in a string
1446 1450 completing_string = False
1447 1451 try:
1448 1452 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1449 1453 except StopIteration:
1450 1454 pass
1451 1455 else:
1452 1456 # note the value may be ', ", or it may also be ''' or """, or
1453 1457 # in some cases, """what/you/typed..., but all of these are
1454 1458 # strings.
1455 1459 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1456 1460
1457 1461 # if we are in a string jedi is likely not the right candidate for
1458 1462 # now. Skip it.
1459 1463 try_jedi = not completing_string
1460 1464 except Exception as e:
1461 1465 # many of things can go wrong, we are using private API just don't crash.
1462 1466 if self.debug:
1463 1467 print("Error detecting if completing a non-finished string :", e, '|')
1464 1468
1465 1469 if not try_jedi:
1466 1470 return []
1467 1471 try:
1468 1472 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1469 1473 except Exception as e:
1470 1474 if self.debug:
1471 1475 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1472 1476 else:
1473 1477 return []
1474 1478
1475 1479 def python_matches(self, text:str)->List[str]:
1476 1480 """Match attributes or global python names"""
1477 1481 if "." in text:
1478 1482 try:
1479 1483 matches = self.attr_matches(text)
1480 1484 if text.endswith('.') and self.omit__names:
1481 1485 if self.omit__names == 1:
1482 1486 # true if txt is _not_ a __ name, false otherwise:
1483 1487 no__name = (lambda txt:
1484 1488 re.match(r'.*\.__.*?__',txt) is None)
1485 1489 else:
1486 1490 # true if txt is _not_ a _ name, false otherwise:
1487 1491 no__name = (lambda txt:
1488 1492 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1489 1493 matches = filter(no__name, matches)
1490 1494 except NameError:
1491 1495 # catches <undefined attributes>.<tab>
1492 1496 matches = []
1493 1497 else:
1494 1498 matches = self.global_matches(text)
1495 1499 return matches
1496 1500
1497 1501 def _default_arguments_from_docstring(self, doc):
1498 1502 """Parse the first line of docstring for call signature.
1499 1503
1500 1504 Docstring should be of the form 'min(iterable[, key=func])\n'.
1501 1505 It can also parse cython docstring of the form
1502 1506 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1503 1507 """
1504 1508 if doc is None:
1505 1509 return []
1506 1510
1507 1511 #care only the firstline
1508 1512 line = doc.lstrip().splitlines()[0]
1509 1513
1510 1514 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1511 1515 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1512 1516 sig = self.docstring_sig_re.search(line)
1513 1517 if sig is None:
1514 1518 return []
1515 1519 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1516 1520 sig = sig.groups()[0].split(',')
1517 1521 ret = []
1518 1522 for s in sig:
1519 1523 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1520 1524 ret += self.docstring_kwd_re.findall(s)
1521 1525 return ret
1522 1526
1523 1527 def _default_arguments(self, obj):
1524 1528 """Return the list of default arguments of obj if it is callable,
1525 1529 or empty list otherwise."""
1526 1530 call_obj = obj
1527 1531 ret = []
1528 1532 if inspect.isbuiltin(obj):
1529 1533 pass
1530 1534 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1531 1535 if inspect.isclass(obj):
1532 1536 #for cython embedsignature=True the constructor docstring
1533 1537 #belongs to the object itself not __init__
1534 1538 ret += self._default_arguments_from_docstring(
1535 1539 getattr(obj, '__doc__', ''))
1536 1540 # for classes, check for __init__,__new__
1537 1541 call_obj = (getattr(obj, '__init__', None) or
1538 1542 getattr(obj, '__new__', None))
1539 1543 # for all others, check if they are __call__able
1540 1544 elif hasattr(obj, '__call__'):
1541 1545 call_obj = obj.__call__
1542 1546 ret += self._default_arguments_from_docstring(
1543 1547 getattr(call_obj, '__doc__', ''))
1544 1548
1545 1549 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1546 1550 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1547 1551
1548 1552 try:
1549 1553 sig = inspect.signature(obj)
1550 1554 ret.extend(k for k, v in sig.parameters.items() if
1551 1555 v.kind in _keeps)
1552 1556 except ValueError:
1553 1557 pass
1554 1558
1555 1559 return list(set(ret))
1556 1560
1557 1561 def python_func_kw_matches(self, text):
1558 1562 """Match named parameters (kwargs) of the last open function"""
1559 1563
1560 1564 if "." in text: # a parameter cannot be dotted
1561 1565 return []
1562 1566 try: regexp = self.__funcParamsRegex
1563 1567 except AttributeError:
1564 1568 regexp = self.__funcParamsRegex = re.compile(r'''
1565 1569 '.*?(?<!\\)' | # single quoted strings or
1566 1570 ".*?(?<!\\)" | # double quoted strings or
1567 1571 \w+ | # identifier
1568 1572 \S # other characters
1569 1573 ''', re.VERBOSE | re.DOTALL)
1570 1574 # 1. find the nearest identifier that comes before an unclosed
1571 1575 # parenthesis before the cursor
1572 1576 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1573 1577 tokens = regexp.findall(self.text_until_cursor)
1574 1578 iterTokens = reversed(tokens); openPar = 0
1575 1579
1576 1580 for token in iterTokens:
1577 1581 if token == ')':
1578 1582 openPar -= 1
1579 1583 elif token == '(':
1580 1584 openPar += 1
1581 1585 if openPar > 0:
1582 1586 # found the last unclosed parenthesis
1583 1587 break
1584 1588 else:
1585 1589 return []
1586 1590 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1587 1591 ids = []
1588 1592 isId = re.compile(r'\w+$').match
1589 1593
1590 1594 while True:
1591 1595 try:
1592 1596 ids.append(next(iterTokens))
1593 1597 if not isId(ids[-1]):
1594 1598 ids.pop(); break
1595 1599 if not next(iterTokens) == '.':
1596 1600 break
1597 1601 except StopIteration:
1598 1602 break
1599 1603
1600 1604 # Find all named arguments already assigned to, as to avoid suggesting
1601 1605 # them again
1602 1606 usedNamedArgs = set()
1603 1607 par_level = -1
1604 1608 for token, next_token in zip(tokens, tokens[1:]):
1605 1609 if token == '(':
1606 1610 par_level += 1
1607 1611 elif token == ')':
1608 1612 par_level -= 1
1609 1613
1610 1614 if par_level != 0:
1611 1615 continue
1612 1616
1613 1617 if next_token != '=':
1614 1618 continue
1615 1619
1616 1620 usedNamedArgs.add(token)
1617 1621
1618 1622 argMatches = []
1619 1623 try:
1620 1624 callableObj = '.'.join(ids[::-1])
1621 1625 namedArgs = self._default_arguments(eval(callableObj,
1622 1626 self.namespace))
1623 1627
1624 1628 # Remove used named arguments from the list, no need to show twice
1625 1629 for namedArg in set(namedArgs) - usedNamedArgs:
1626 1630 if namedArg.startswith(text):
1627 1631 argMatches.append("%s=" %namedArg)
1628 1632 except:
1629 1633 pass
1630 1634
1631 1635 return argMatches
1632 1636
1633 1637 @staticmethod
1634 1638 def _get_keys(obj: Any) -> List[Any]:
1635 1639 # Objects can define their own completions by defining an
1636 1640 # _ipy_key_completions_() method.
1637 1641 method = get_real_method(obj, '_ipython_key_completions_')
1638 1642 if method is not None:
1639 1643 return method()
1640 1644
1641 1645 # Special case some common in-memory dict-like types
1642 1646 if isinstance(obj, dict) or\
1643 1647 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1644 1648 try:
1645 1649 return list(obj.keys())
1646 1650 except Exception:
1647 1651 return []
1648 1652 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1649 1653 _safe_isinstance(obj, 'numpy', 'void'):
1650 1654 return obj.dtype.names or []
1651 1655 return []
1652 1656
1653 1657 def dict_key_matches(self, text:str) -> List[str]:
1654 1658 "Match string keys in a dictionary, after e.g. 'foo[' "
1655 1659
1656 1660
1657 1661 if self.__dict_key_regexps is not None:
1658 1662 regexps = self.__dict_key_regexps
1659 1663 else:
1660 1664 dict_key_re_fmt = r'''(?x)
1661 1665 ( # match dict-referring expression wrt greedy setting
1662 1666 %s
1663 1667 )
1664 1668 \[ # open bracket
1665 1669 \s* # and optional whitespace
1666 1670 # Capture any number of str-like objects (e.g. "a", "b", 'c')
1667 1671 ((?:[uUbB]? # string prefix (r not handled)
1668 1672 (?:
1669 1673 '(?:[^']|(?<!\\)\\')*'
1670 1674 |
1671 1675 "(?:[^"]|(?<!\\)\\")*"
1672 1676 )
1673 1677 \s*,\s*
1674 1678 )*)
1675 1679 ([uUbB]? # string prefix (r not handled)
1676 1680 (?: # unclosed string
1677 1681 '(?:[^']|(?<!\\)\\')*
1678 1682 |
1679 1683 "(?:[^"]|(?<!\\)\\")*
1680 1684 )
1681 1685 )?
1682 1686 $
1683 1687 '''
1684 1688 regexps = self.__dict_key_regexps = {
1685 1689 False: re.compile(dict_key_re_fmt % r'''
1686 1690 # identifiers separated by .
1687 1691 (?!\d)\w+
1688 1692 (?:\.(?!\d)\w+)*
1689 1693 '''),
1690 1694 True: re.compile(dict_key_re_fmt % '''
1691 1695 .+
1692 1696 ''')
1693 1697 }
1694 1698
1695 1699 match = regexps[self.greedy].search(self.text_until_cursor)
1696 1700
1697 1701 if match is None:
1698 1702 return []
1699 1703
1700 1704 expr, prefix0, prefix = match.groups()
1701 1705 try:
1702 1706 obj = eval(expr, self.namespace)
1703 1707 except Exception:
1704 1708 try:
1705 1709 obj = eval(expr, self.global_namespace)
1706 1710 except Exception:
1707 1711 return []
1708 1712
1709 1713 keys = self._get_keys(obj)
1710 1714 if not keys:
1711 1715 return keys
1712 1716
1713 1717 extra_prefix = eval(prefix0) if prefix0 != '' else None
1714 1718
1715 1719 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
1716 1720 if not matches:
1717 1721 return matches
1718 1722
1719 1723 # get the cursor position of
1720 1724 # - the text being completed
1721 1725 # - the start of the key text
1722 1726 # - the start of the completion
1723 1727 text_start = len(self.text_until_cursor) - len(text)
1724 1728 if prefix:
1725 1729 key_start = match.start(3)
1726 1730 completion_start = key_start + token_offset
1727 1731 else:
1728 1732 key_start = completion_start = match.end()
1729 1733
1730 1734 # grab the leading prefix, to make sure all completions start with `text`
1731 1735 if text_start > key_start:
1732 1736 leading = ''
1733 1737 else:
1734 1738 leading = text[text_start:completion_start]
1735 1739
1736 1740 # the index of the `[` character
1737 1741 bracket_idx = match.end(1)
1738 1742
1739 1743 # append closing quote and bracket as appropriate
1740 1744 # this is *not* appropriate if the opening quote or bracket is outside
1741 1745 # the text given to this method
1742 1746 suf = ''
1743 1747 continuation = self.line_buffer[len(self.text_until_cursor):]
1744 1748 if key_start > text_start and closing_quote:
1745 1749 # quotes were opened inside text, maybe close them
1746 1750 if continuation.startswith(closing_quote):
1747 1751 continuation = continuation[len(closing_quote):]
1748 1752 else:
1749 1753 suf += closing_quote
1750 1754 if bracket_idx > text_start:
1751 1755 # brackets were opened inside text, maybe close them
1752 1756 if not continuation.startswith(']'):
1753 1757 suf += ']'
1754 1758
1755 1759 return [leading + k + suf for k in matches]
1756 1760
1757 1761 @staticmethod
1758 1762 def unicode_name_matches(text:str) -> Tuple[str, List[str]] :
1759 1763 """Match Latex-like syntax for unicode characters base
1760 1764 on the name of the character.
1761 1765
1762 1766 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1763 1767
1764 1768 Works only on valid python 3 identifier, or on combining characters that
1765 1769 will combine to form a valid identifier.
1766 1770 """
1767 1771 slashpos = text.rfind('\\')
1768 1772 if slashpos > -1:
1769 1773 s = text[slashpos+1:]
1770 1774 try :
1771 1775 unic = unicodedata.lookup(s)
1772 1776 # allow combining chars
1773 1777 if ('a'+unic).isidentifier():
1774 1778 return '\\'+s,[unic]
1775 1779 except KeyError:
1776 1780 pass
1777 1781 return '', []
1778 1782
1779 1783
1780 1784 def latex_matches(self, text:str) -> Tuple[str, Sequence[str]]:
1781 1785 """Match Latex syntax for unicode characters.
1782 1786
1783 1787 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1784 1788 """
1785 1789 slashpos = text.rfind('\\')
1786 1790 if slashpos > -1:
1787 1791 s = text[slashpos:]
1788 1792 if s in latex_symbols:
1789 1793 # Try to complete a full latex symbol to unicode
1790 1794 # \\alpha -> Ξ±
1791 1795 return s, [latex_symbols[s]]
1792 1796 else:
1793 1797 # If a user has partially typed a latex symbol, give them
1794 1798 # a full list of options \al -> [\aleph, \alpha]
1795 1799 matches = [k for k in latex_symbols if k.startswith(s)]
1796 1800 if matches:
1797 1801 return s, matches
1798 1802 return '', ()
1799 1803
1800 1804 def dispatch_custom_completer(self, text):
1801 1805 if not self.custom_completers:
1802 1806 return
1803 1807
1804 1808 line = self.line_buffer
1805 1809 if not line.strip():
1806 1810 return None
1807 1811
1808 1812 # Create a little structure to pass all the relevant information about
1809 1813 # the current completion to any custom completer.
1810 1814 event = SimpleNamespace()
1811 1815 event.line = line
1812 1816 event.symbol = text
1813 1817 cmd = line.split(None,1)[0]
1814 1818 event.command = cmd
1815 1819 event.text_until_cursor = self.text_until_cursor
1816 1820
1817 1821 # for foo etc, try also to find completer for %foo
1818 1822 if not cmd.startswith(self.magic_escape):
1819 1823 try_magic = self.custom_completers.s_matches(
1820 1824 self.magic_escape + cmd)
1821 1825 else:
1822 1826 try_magic = []
1823 1827
1824 1828 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1825 1829 try_magic,
1826 1830 self.custom_completers.flat_matches(self.text_until_cursor)):
1827 1831 try:
1828 1832 res = c(event)
1829 1833 if res:
1830 1834 # first, try case sensitive match
1831 1835 withcase = [r for r in res if r.startswith(text)]
1832 1836 if withcase:
1833 1837 return withcase
1834 1838 # if none, then case insensitive ones are ok too
1835 1839 text_low = text.lower()
1836 1840 return [r for r in res if r.lower().startswith(text_low)]
1837 1841 except TryNext:
1838 1842 pass
1839 1843 except KeyboardInterrupt:
1840 1844 """
1841 1845 If custom completer take too long,
1842 1846 let keyboard interrupt abort and return nothing.
1843 1847 """
1844 1848 break
1845 1849
1846 1850 return None
1847 1851
1848 1852 def completions(self, text: str, offset: int)->Iterator[Completion]:
1849 1853 """
1850 1854 Returns an iterator over the possible completions
1851 1855
1852 1856 .. warning::
1853 1857
1854 1858 Unstable
1855 1859
1856 1860 This function is unstable, API may change without warning.
1857 1861 It will also raise unless use in proper context manager.
1858 1862
1859 1863 Parameters
1860 1864 ----------
1861 1865 text : str
1862 1866 Full text of the current input, multi line string.
1863 1867 offset : int
1864 1868 Integer representing the position of the cursor in ``text``. Offset
1865 1869 is 0-based indexed.
1866 1870
1867 1871 Yields
1868 1872 ------
1869 1873 Completion
1870 1874
1871 1875 Notes
1872 1876 -----
1873 1877 The cursor on a text can either be seen as being "in between"
1874 1878 characters or "On" a character depending on the interface visible to
1875 1879 the user. For consistency the cursor being on "in between" characters X
1876 1880 and Y is equivalent to the cursor being "on" character Y, that is to say
1877 1881 the character the cursor is on is considered as being after the cursor.
1878 1882
1879 1883 Combining characters may span more that one position in the
1880 1884 text.
1881 1885
1882 1886 .. note::
1883 1887
1884 1888 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1885 1889 fake Completion token to distinguish completion returned by Jedi
1886 1890 and usual IPython completion.
1887 1891
1888 1892 .. note::
1889 1893
1890 1894 Completions are not completely deduplicated yet. If identical
1891 1895 completions are coming from different sources this function does not
1892 1896 ensure that each completion object will only be present once.
1893 1897 """
1894 1898 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1895 1899 "It may change without warnings. "
1896 1900 "Use in corresponding context manager.",
1897 1901 category=ProvisionalCompleterWarning, stacklevel=2)
1898 1902
1899 1903 seen = set()
1900 1904 profiler:Optional[cProfile.Profile]
1901 1905 try:
1902 1906 if self.profile_completions:
1903 1907 import cProfile
1904 1908 profiler = cProfile.Profile()
1905 1909 profiler.enable()
1906 1910 else:
1907 1911 profiler = None
1908 1912
1909 1913 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1910 1914 if c and (c in seen):
1911 1915 continue
1912 1916 yield c
1913 1917 seen.add(c)
1914 1918 except KeyboardInterrupt:
1915 1919 """if completions take too long and users send keyboard interrupt,
1916 1920 do not crash and return ASAP. """
1917 1921 pass
1918 1922 finally:
1919 1923 if profiler is not None:
1920 1924 profiler.disable()
1921 1925 ensure_dir_exists(self.profiler_output_dir)
1922 1926 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
1923 1927 print("Writing profiler output to", output_path)
1924 1928 profiler.dump_stats(output_path)
1925 1929
1926 1930 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
1927 1931 """
1928 1932 Core completion module.Same signature as :any:`completions`, with the
1929 1933 extra `timeout` parameter (in seconds).
1930 1934
1931 1935 Computing jedi's completion ``.type`` can be quite expensive (it is a
1932 1936 lazy property) and can require some warm-up, more warm up than just
1933 1937 computing the ``name`` of a completion. The warm-up can be :
1934 1938
1935 1939 - Long warm-up the first time a module is encountered after
1936 1940 install/update: actually build parse/inference tree.
1937 1941
1938 1942 - first time the module is encountered in a session: load tree from
1939 1943 disk.
1940 1944
1941 1945 We don't want to block completions for tens of seconds so we give the
1942 1946 completer a "budget" of ``_timeout`` seconds per invocation to compute
1943 1947 completions types, the completions that have not yet been computed will
1944 1948 be marked as "unknown" an will have a chance to be computed next round
1945 1949 are things get cached.
1946 1950
1947 1951 Keep in mind that Jedi is not the only thing treating the completion so
1948 1952 keep the timeout short-ish as if we take more than 0.3 second we still
1949 1953 have lots of processing to do.
1950 1954
1951 1955 """
1952 1956 deadline = time.monotonic() + _timeout
1953 1957
1954 1958
1955 1959 before = full_text[:offset]
1956 1960 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1957 1961
1958 1962 matched_text, matches, matches_origin, jedi_matches = self._complete(
1959 1963 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1960 1964
1961 1965 iter_jm = iter(jedi_matches)
1962 1966 if _timeout:
1963 1967 for jm in iter_jm:
1964 1968 try:
1965 1969 type_ = jm.type
1966 1970 except Exception:
1967 1971 if self.debug:
1968 1972 print("Error in Jedi getting type of ", jm)
1969 1973 type_ = None
1970 1974 delta = len(jm.name_with_symbols) - len(jm.complete)
1971 1975 if type_ == 'function':
1972 1976 signature = _make_signature(jm)
1973 1977 else:
1974 1978 signature = ''
1975 1979 yield Completion(start=offset - delta,
1976 1980 end=offset,
1977 1981 text=jm.name_with_symbols,
1978 1982 type=type_,
1979 1983 signature=signature,
1980 1984 _origin='jedi')
1981 1985
1982 1986 if time.monotonic() > deadline:
1983 1987 break
1984 1988
1985 1989 for jm in iter_jm:
1986 1990 delta = len(jm.name_with_symbols) - len(jm.complete)
1987 1991 yield Completion(start=offset - delta,
1988 1992 end=offset,
1989 1993 text=jm.name_with_symbols,
1990 1994 type='<unknown>', # don't compute type for speed
1991 1995 _origin='jedi',
1992 1996 signature='')
1993 1997
1994 1998
1995 1999 start_offset = before.rfind(matched_text)
1996 2000
1997 2001 # TODO:
1998 2002 # Suppress this, right now just for debug.
1999 2003 if jedi_matches and matches and self.debug:
2000 2004 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--',
2001 2005 _origin='debug', type='none', signature='')
2002 2006
2003 2007 # I'm unsure if this is always true, so let's assert and see if it
2004 2008 # crash
2005 2009 assert before.endswith(matched_text)
2006 2010 for m, t in zip(matches, matches_origin):
2007 2011 yield Completion(start=start_offset, end=offset, text=m, _origin=t, signature='', type='<unknown>')
2008 2012
2009 2013
2010 2014 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2011 2015 """Find completions for the given text and line context.
2012 2016
2013 2017 Note that both the text and the line_buffer are optional, but at least
2014 2018 one of them must be given.
2015 2019
2016 2020 Parameters
2017 2021 ----------
2018 2022 text : string, optional
2019 2023 Text to perform the completion on. If not given, the line buffer
2020 2024 is split using the instance's CompletionSplitter object.
2021 2025 line_buffer : string, optional
2022 2026 If not given, the completer attempts to obtain the current line
2023 2027 buffer via readline. This keyword allows clients which are
2024 2028 requesting for text completions in non-readline contexts to inform
2025 2029 the completer of the entire text.
2026 2030 cursor_pos : int, optional
2027 2031 Index of the cursor in the full line buffer. Should be provided by
2028 2032 remote frontends where kernel has no access to frontend state.
2029 2033
2030 2034 Returns
2031 2035 -------
2032 2036 Tuple of two items:
2033 2037 text : str
2034 2038 Text that was actually used in the completion.
2035 2039 matches : list
2036 2040 A list of completion matches.
2037 2041
2038 2042 Notes
2039 2043 -----
2040 2044 This API is likely to be deprecated and replaced by
2041 2045 :any:`IPCompleter.completions` in the future.
2042 2046
2043 2047 """
2044 2048 warnings.warn('`Completer.complete` is pending deprecation since '
2045 2049 'IPython 6.0 and will be replaced by `Completer.completions`.',
2046 2050 PendingDeprecationWarning)
2047 2051 # potential todo, FOLD the 3rd throw away argument of _complete
2048 2052 # into the first 2 one.
2049 2053 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
2050 2054
2051 2055 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2052 2056 full_text=None) -> _CompleteResult:
2053 2057 """
2054 2058 Like complete but can also returns raw jedi completions as well as the
2055 2059 origin of the completion text. This could (and should) be made much
2056 2060 cleaner but that will be simpler once we drop the old (and stateful)
2057 2061 :any:`complete` API.
2058 2062
2059 2063 With current provisional API, cursor_pos act both (depending on the
2060 2064 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2061 2065 ``column`` when passing multiline strings this could/should be renamed
2062 2066 but would add extra noise.
2063 2067
2064 2068 Parameters
2065 2069 ----------
2066 2070 cursor_line
2067 2071 Index of the line the cursor is on. 0 indexed.
2068 2072 cursor_pos
2069 2073 Position of the cursor in the current line/line_buffer/text. 0
2070 2074 indexed.
2071 2075 line_buffer : optional, str
2072 2076 The current line the cursor is in, this is mostly due to legacy
2073 2077 reason that readline could only give a us the single current line.
2074 2078 Prefer `full_text`.
2075 2079 text : str
2076 2080 The current "token" the cursor is in, mostly also for historical
2077 2081 reasons. as the completer would trigger only after the current line
2078 2082 was parsed.
2079 2083 full_text : str
2080 2084 Full text of the current cell.
2081 2085
2082 2086 Returns
2083 2087 -------
2084 2088 A tuple of N elements which are (likely):
2085 2089 matched_text: ? the text that the complete matched
2086 2090 matches: list of completions ?
2087 2091 matches_origin: ? list same length as matches, and where each completion came from
2088 2092 jedi_matches: list of Jedi matches, have it's own structure.
2089 2093 """
2090 2094
2091 2095
2092 2096 # if the cursor position isn't given, the only sane assumption we can
2093 2097 # make is that it's at the end of the line (the common case)
2094 2098 if cursor_pos is None:
2095 2099 cursor_pos = len(line_buffer) if text is None else len(text)
2096 2100
2097 2101 if self.use_main_ns:
2098 2102 self.namespace = __main__.__dict__
2099 2103
2100 2104 # if text is either None or an empty string, rely on the line buffer
2101 2105 if (not line_buffer) and full_text:
2102 2106 line_buffer = full_text.split('\n')[cursor_line]
2103 2107 if not text: # issue #11508: check line_buffer before calling split_line
2104 2108 text = self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ''
2105 2109
2106 2110 if self.backslash_combining_completions:
2107 2111 # allow deactivation of these on windows.
2108 2112 base_text = text if not line_buffer else line_buffer[:cursor_pos]
2109 2113
2110 2114 for meth in (self.latex_matches,
2111 2115 self.unicode_name_matches,
2112 2116 back_latex_name_matches,
2113 2117 back_unicode_name_matches,
2114 2118 self.fwd_unicode_match):
2115 2119 name_text, name_matches = meth(base_text)
2116 2120 if name_text:
2117 2121 return _CompleteResult(name_text, name_matches[:MATCHES_LIMIT], \
2118 2122 [meth.__qualname__]*min(len(name_matches), MATCHES_LIMIT), ())
2119 2123
2120 2124
2121 2125 # If no line buffer is given, assume the input text is all there was
2122 2126 if line_buffer is None:
2123 2127 line_buffer = text
2124 2128
2125 2129 self.line_buffer = line_buffer
2126 2130 self.text_until_cursor = self.line_buffer[:cursor_pos]
2127 2131
2128 2132 # Do magic arg matches
2129 2133 for matcher in self.magic_arg_matchers:
2130 2134 matches = list(matcher(line_buffer))[:MATCHES_LIMIT]
2131 2135 if matches:
2132 2136 origins = [matcher.__qualname__] * len(matches)
2133 2137 return _CompleteResult(text, matches, origins, ())
2134 2138
2135 2139 # Start with a clean slate of completions
2136 2140 matches = []
2137 2141
2138 2142 # FIXME: we should extend our api to return a dict with completions for
2139 2143 # different types of objects. The rlcomplete() method could then
2140 2144 # simply collapse the dict into a list for readline, but we'd have
2141 2145 # richer completion semantics in other environments.
2142 2146 is_magic_prefix = len(text) > 0 and text[0] == "%"
2143 2147 completions: Iterable[Any] = []
2144 2148 if self.use_jedi and not is_magic_prefix:
2145 2149 if not full_text:
2146 2150 full_text = line_buffer
2147 2151 completions = self._jedi_matches(
2148 2152 cursor_pos, cursor_line, full_text)
2149 2153
2150 2154 if self.merge_completions:
2151 2155 matches = []
2152 2156 for matcher in self.matchers:
2153 2157 try:
2154 2158 matches.extend([(m, matcher.__qualname__)
2155 2159 for m in matcher(text)])
2156 2160 except:
2157 2161 # Show the ugly traceback if the matcher causes an
2158 2162 # exception, but do NOT crash the kernel!
2159 2163 sys.excepthook(*sys.exc_info())
2160 2164 else:
2161 2165 for matcher in self.matchers:
2162 2166 matches = [(m, matcher.__qualname__)
2163 2167 for m in matcher(text)]
2164 2168 if matches:
2165 2169 break
2166 2170
2167 2171 seen = set()
2168 2172 filtered_matches = set()
2169 2173 for m in matches:
2170 2174 t, c = m
2171 2175 if t not in seen:
2172 2176 filtered_matches.add(m)
2173 2177 seen.add(t)
2174 2178
2175 2179 _filtered_matches = sorted(filtered_matches, key=lambda x: completions_sorting_key(x[0]))
2176 2180
2177 2181 custom_res = [(m, 'custom') for m in self.dispatch_custom_completer(text) or []]
2178 2182
2179 2183 _filtered_matches = custom_res or _filtered_matches
2180 2184
2181 2185 _filtered_matches = _filtered_matches[:MATCHES_LIMIT]
2182 2186 _matches = [m[0] for m in _filtered_matches]
2183 2187 origins = [m[1] for m in _filtered_matches]
2184 2188
2185 2189 self.matches = _matches
2186 2190
2187 2191 return _CompleteResult(text, _matches, origins, completions)
2188 2192
2189 2193 def fwd_unicode_match(self, text:str) -> Tuple[str, Sequence[str]]:
2190 2194 """
2191 2195 Forward match a string starting with a backslash with a list of
2192 2196 potential Unicode completions.
2193 2197
2194 2198 Will compute list list of Unicode character names on first call and cache it.
2195 2199
2196 2200 Returns
2197 2201 -------
2198 2202 At tuple with:
2199 2203 - matched text (empty if no matches)
2200 2204 - list of potential completions, empty tuple otherwise)
2201 2205 """
2202 2206 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2203 2207 # We could do a faster match using a Trie.
2204 2208
2205 2209 # Using pygtrie the following seem to work:
2206 2210
2207 2211 # s = PrefixSet()
2208 2212
2209 2213 # for c in range(0,0x10FFFF + 1):
2210 2214 # try:
2211 2215 # s.add(unicodedata.name(chr(c)))
2212 2216 # except ValueError:
2213 2217 # pass
2214 2218 # [''.join(k) for k in s.iter(prefix)]
2215 2219
2216 2220 # But need to be timed and adds an extra dependency.
2217 2221
2218 2222 slashpos = text.rfind('\\')
2219 2223 # if text starts with slash
2220 2224 if slashpos > -1:
2221 2225 # PERF: It's important that we don't access self._unicode_names
2222 2226 # until we're inside this if-block. _unicode_names is lazily
2223 2227 # initialized, and it takes a user-noticeable amount of time to
2224 2228 # initialize it, so we don't want to initialize it unless we're
2225 2229 # actually going to use it.
2226 2230 s = text[slashpos + 1 :]
2227 2231 sup = s.upper()
2228 2232 candidates = [x for x in self.unicode_names if x.startswith(sup)]
2229 2233 if candidates:
2230 2234 return s, candidates
2231 2235 candidates = [x for x in self.unicode_names if sup in x]
2232 2236 if candidates:
2233 2237 return s, candidates
2234 2238 splitsup = sup.split(" ")
2235 2239 candidates = [
2236 2240 x for x in self.unicode_names if all(u in x for u in splitsup)
2237 2241 ]
2238 2242 if candidates:
2239 2243 return s, candidates
2240 2244
2241 2245 return "", ()
2242 2246
2243 2247 # if text does not start with slash
2244 2248 else:
2245 2249 return '', ()
2246 2250
2247 2251 @property
2248 2252 def unicode_names(self) -> List[str]:
2249 2253 """List of names of unicode code points that can be completed.
2250 2254
2251 2255 The list is lazily initialized on first access.
2252 2256 """
2253 2257 if self._unicode_names is None:
2254 2258 names = []
2255 2259 for c in range(0,0x10FFFF + 1):
2256 2260 try:
2257 2261 names.append(unicodedata.name(chr(c)))
2258 2262 except ValueError:
2259 2263 pass
2260 2264 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2261 2265
2262 2266 return self._unicode_names
2263 2267
2264 2268 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2265 2269 names = []
2266 2270 for start,stop in ranges:
2267 2271 for c in range(start, stop) :
2268 2272 try:
2269 2273 names.append(unicodedata.name(chr(c)))
2270 2274 except ValueError:
2271 2275 pass
2272 2276 return names
@@ -1,536 +1,539 b''
1 1 # coding: utf-8
2 2 """Tests for IPython.lib.pretty."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7
8 8 from collections import Counter, defaultdict, deque, OrderedDict, UserList
9 9 import os
10 10 import pytest
11 11 import types
12 12 import string
13 13 import sys
14 14 import unittest
15 15
16 16 import pytest
17 17
18 18 from IPython.lib import pretty
19 19
20 20 from io import StringIO
21 21
22 22
23 23 class MyList(object):
24 24 def __init__(self, content):
25 25 self.content = content
26 26 def _repr_pretty_(self, p, cycle):
27 27 if cycle:
28 28 p.text("MyList(...)")
29 29 else:
30 30 with p.group(3, "MyList(", ")"):
31 31 for (i, child) in enumerate(self.content):
32 32 if i:
33 33 p.text(",")
34 34 p.breakable()
35 35 else:
36 36 p.breakable("")
37 37 p.pretty(child)
38 38
39 39
40 40 class MyDict(dict):
41 41 def _repr_pretty_(self, p, cycle):
42 42 p.text("MyDict(...)")
43 43
44 44 class MyObj(object):
45 45 def somemethod(self):
46 46 pass
47 47
48 48
49 49 class Dummy1(object):
50 50 def _repr_pretty_(self, p, cycle):
51 51 p.text("Dummy1(...)")
52 52
53 53 class Dummy2(Dummy1):
54 54 _repr_pretty_ = None
55 55
56 56 class NoModule(object):
57 57 pass
58 58
59 59 NoModule.__module__ = None
60 60
61 61 class Breaking(object):
62 62 def _repr_pretty_(self, p, cycle):
63 63 with p.group(4,"TG: ",":"):
64 64 p.text("Breaking(")
65 65 p.break_()
66 66 p.text(")")
67 67
68 68 class BreakingRepr(object):
69 69 def __repr__(self):
70 70 return "Breaking(\n)"
71 71
72 72 class BadRepr(object):
73 73 def __repr__(self):
74 74 return 1/0
75 75
76 76
77 77 def test_indentation():
78 78 """Test correct indentation in groups"""
79 79 count = 40
80 80 gotoutput = pretty.pretty(MyList(range(count)))
81 81 expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
82 82
83 83 assert gotoutput == expectedoutput
84 84
85 85
86 86 def test_dispatch():
87 87 """
88 88 Test correct dispatching: The _repr_pretty_ method for MyDict
89 89 must be found before the registered printer for dict.
90 90 """
91 91 gotoutput = pretty.pretty(MyDict())
92 92 expectedoutput = "MyDict(...)"
93 93
94 94 assert gotoutput == expectedoutput
95 95
96 96
97 97 def test_callability_checking():
98 98 """
99 99 Test that the _repr_pretty_ method is tested for callability and skipped if
100 100 not.
101 101 """
102 102 gotoutput = pretty.pretty(Dummy2())
103 103 expectedoutput = "Dummy1(...)"
104 104
105 105 assert gotoutput == expectedoutput
106 106
107 107
108 108 @pytest.mark.parametrize(
109 109 "obj,expected_output",
110 110 zip(
111 111 [
112 112 set(),
113 113 frozenset(),
114 114 set([1]),
115 115 frozenset([1]),
116 116 set([1, 2]),
117 117 frozenset([1, 2]),
118 118 set([-1, -2, -3]),
119 119 ],
120 120 [
121 121 "set()",
122 122 "frozenset()",
123 123 "{1}",
124 124 "frozenset({1})",
125 125 "{1, 2}",
126 126 "frozenset({1, 2})",
127 127 "{-3, -2, -1}",
128 128 ],
129 129 ),
130 130 )
131 131 def test_sets(obj, expected_output):
132 132 """
133 133 Test that set and frozenset use Python 3 formatting.
134 134 """
135 135 got_output = pretty.pretty(obj)
136 136 assert got_output == expected_output
137 137
138 138
139 139 def test_pprint_heap_allocated_type():
140 140 """
141 141 Test that pprint works for heap allocated types.
142 142 """
143 143 module_name = "xxlimited" if sys.version_info < (3, 10) else "xxlimited_35"
144 expected_output = (
145 "xxlimited.Null" if sys.version_info < (3, 10, 6) else "xxlimited_35.Null"
146 )
144 147 xxlimited = pytest.importorskip(module_name)
145 148 output = pretty.pretty(xxlimited.Null)
146 assert output == "xxlimited.Null"
149 assert output == expected_output
147 150
148 151
149 152 def test_pprint_nomod():
150 153 """
151 154 Test that pprint works for classes with no __module__.
152 155 """
153 156 output = pretty.pretty(NoModule)
154 157 assert output == "NoModule"
155 158
156 159
157 160 def test_pprint_break():
158 161 """
159 162 Test that p.break_ produces expected output
160 163 """
161 164 output = pretty.pretty(Breaking())
162 165 expected = "TG: Breaking(\n ):"
163 166 assert output == expected
164 167
165 168 def test_pprint_break_repr():
166 169 """
167 170 Test that p.break_ is used in repr
168 171 """
169 172 output = pretty.pretty([[BreakingRepr()]])
170 173 expected = "[[Breaking(\n )]]"
171 174 assert output == expected
172 175
173 176 output = pretty.pretty([[BreakingRepr()]*2])
174 177 expected = "[[Breaking(\n ),\n Breaking(\n )]]"
175 178 assert output == expected
176 179
177 180 def test_bad_repr():
178 181 """Don't catch bad repr errors"""
179 182 with pytest.raises(ZeroDivisionError):
180 183 pretty.pretty(BadRepr())
181 184
182 185 class BadException(Exception):
183 186 def __str__(self):
184 187 return -1
185 188
186 189 class ReallyBadRepr(object):
187 190 __module__ = 1
188 191 @property
189 192 def __class__(self):
190 193 raise ValueError("I am horrible")
191 194
192 195 def __repr__(self):
193 196 raise BadException()
194 197
195 198 def test_really_bad_repr():
196 199 with pytest.raises(BadException):
197 200 pretty.pretty(ReallyBadRepr())
198 201
199 202
200 203 class SA(object):
201 204 pass
202 205
203 206 class SB(SA):
204 207 pass
205 208
206 209 class TestsPretty(unittest.TestCase):
207 210
208 211 def test_super_repr(self):
209 212 # "<super: module_name.SA, None>"
210 213 output = pretty.pretty(super(SA))
211 214 self.assertRegex(output, r"<super: \S+.SA, None>")
212 215
213 216 # "<super: module_name.SA, <module_name.SB at 0x...>>"
214 217 sb = SB()
215 218 output = pretty.pretty(super(SA, sb))
216 219 self.assertRegex(output, r"<super: \S+.SA,\s+<\S+.SB at 0x\S+>>")
217 220
218 221
219 222 def test_long_list(self):
220 223 lis = list(range(10000))
221 224 p = pretty.pretty(lis)
222 225 last2 = p.rsplit('\n', 2)[-2:]
223 226 self.assertEqual(last2, [' 999,', ' ...]'])
224 227
225 228 def test_long_set(self):
226 229 s = set(range(10000))
227 230 p = pretty.pretty(s)
228 231 last2 = p.rsplit('\n', 2)[-2:]
229 232 self.assertEqual(last2, [' 999,', ' ...}'])
230 233
231 234 def test_long_tuple(self):
232 235 tup = tuple(range(10000))
233 236 p = pretty.pretty(tup)
234 237 last2 = p.rsplit('\n', 2)[-2:]
235 238 self.assertEqual(last2, [' 999,', ' ...)'])
236 239
237 240 def test_long_dict(self):
238 241 d = { n:n for n in range(10000) }
239 242 p = pretty.pretty(d)
240 243 last2 = p.rsplit('\n', 2)[-2:]
241 244 self.assertEqual(last2, [' 999: 999,', ' ...}'])
242 245
243 246 def test_unbound_method(self):
244 247 output = pretty.pretty(MyObj.somemethod)
245 248 self.assertIn('MyObj.somemethod', output)
246 249
247 250
248 251 class MetaClass(type):
249 252 def __new__(cls, name):
250 253 return type.__new__(cls, name, (object,), {'name': name})
251 254
252 255 def __repr__(self):
253 256 return "[CUSTOM REPR FOR CLASS %s]" % self.name
254 257
255 258
256 259 ClassWithMeta = MetaClass('ClassWithMeta')
257 260
258 261
259 262 def test_metaclass_repr():
260 263 output = pretty.pretty(ClassWithMeta)
261 264 assert output == "[CUSTOM REPR FOR CLASS ClassWithMeta]"
262 265
263 266
264 267 def test_unicode_repr():
265 268 u = u"üniçodé"
266 269 ustr = u
267 270
268 271 class C(object):
269 272 def __repr__(self):
270 273 return ustr
271 274
272 275 c = C()
273 276 p = pretty.pretty(c)
274 277 assert p == u
275 278 p = pretty.pretty([c])
276 279 assert p == "[%s]" % u
277 280
278 281
279 282 def test_basic_class():
280 283 def type_pprint_wrapper(obj, p, cycle):
281 284 if obj is MyObj:
282 285 type_pprint_wrapper.called = True
283 286 return pretty._type_pprint(obj, p, cycle)
284 287 type_pprint_wrapper.called = False
285 288
286 289 stream = StringIO()
287 290 printer = pretty.RepresentationPrinter(stream)
288 291 printer.type_pprinters[type] = type_pprint_wrapper
289 292 printer.pretty(MyObj)
290 293 printer.flush()
291 294 output = stream.getvalue()
292 295
293 296 assert output == "%s.MyObj" % __name__
294 297 assert type_pprint_wrapper.called is True
295 298
296 299
297 300 def test_collections_userlist():
298 301 # Create userlist with cycle
299 302 a = UserList()
300 303 a.append(a)
301 304
302 305 cases = [
303 306 (UserList(), "UserList([])"),
304 307 (
305 308 UserList(i for i in range(1000, 1020)),
306 309 "UserList([1000,\n"
307 310 " 1001,\n"
308 311 " 1002,\n"
309 312 " 1003,\n"
310 313 " 1004,\n"
311 314 " 1005,\n"
312 315 " 1006,\n"
313 316 " 1007,\n"
314 317 " 1008,\n"
315 318 " 1009,\n"
316 319 " 1010,\n"
317 320 " 1011,\n"
318 321 " 1012,\n"
319 322 " 1013,\n"
320 323 " 1014,\n"
321 324 " 1015,\n"
322 325 " 1016,\n"
323 326 " 1017,\n"
324 327 " 1018,\n"
325 328 " 1019])",
326 329 ),
327 330 (a, "UserList([UserList(...)])"),
328 331 ]
329 332 for obj, expected in cases:
330 333 assert pretty.pretty(obj) == expected
331 334
332 335
333 336 # TODO : pytest.mark.parametrise once nose is gone.
334 337 def test_collections_defaultdict():
335 338 # Create defaultdicts with cycles
336 339 a = defaultdict()
337 340 a.default_factory = a
338 341 b = defaultdict(list)
339 342 b['key'] = b
340 343
341 344 # Dictionary order cannot be relied on, test against single keys.
342 345 cases = [
343 346 (defaultdict(list), 'defaultdict(list, {})'),
344 347 (defaultdict(list, {'key': '-' * 50}),
345 348 "defaultdict(list,\n"
346 349 " {'key': '--------------------------------------------------'})"),
347 350 (a, 'defaultdict(defaultdict(...), {})'),
348 351 (b, "defaultdict(list, {'key': defaultdict(...)})"),
349 352 ]
350 353 for obj, expected in cases:
351 354 assert pretty.pretty(obj) == expected
352 355
353 356
354 357 # TODO : pytest.mark.parametrise once nose is gone.
355 358 def test_collections_ordereddict():
356 359 # Create OrderedDict with cycle
357 360 a = OrderedDict()
358 361 a['key'] = a
359 362
360 363 cases = [
361 364 (OrderedDict(), 'OrderedDict()'),
362 365 (OrderedDict((i, i) for i in range(1000, 1010)),
363 366 'OrderedDict([(1000, 1000),\n'
364 367 ' (1001, 1001),\n'
365 368 ' (1002, 1002),\n'
366 369 ' (1003, 1003),\n'
367 370 ' (1004, 1004),\n'
368 371 ' (1005, 1005),\n'
369 372 ' (1006, 1006),\n'
370 373 ' (1007, 1007),\n'
371 374 ' (1008, 1008),\n'
372 375 ' (1009, 1009)])'),
373 376 (a, "OrderedDict([('key', OrderedDict(...))])"),
374 377 ]
375 378 for obj, expected in cases:
376 379 assert pretty.pretty(obj) == expected
377 380
378 381
379 382 # TODO : pytest.mark.parametrise once nose is gone.
380 383 def test_collections_deque():
381 384 # Create deque with cycle
382 385 a = deque()
383 386 a.append(a)
384 387
385 388 cases = [
386 389 (deque(), 'deque([])'),
387 390 (deque(i for i in range(1000, 1020)),
388 391 'deque([1000,\n'
389 392 ' 1001,\n'
390 393 ' 1002,\n'
391 394 ' 1003,\n'
392 395 ' 1004,\n'
393 396 ' 1005,\n'
394 397 ' 1006,\n'
395 398 ' 1007,\n'
396 399 ' 1008,\n'
397 400 ' 1009,\n'
398 401 ' 1010,\n'
399 402 ' 1011,\n'
400 403 ' 1012,\n'
401 404 ' 1013,\n'
402 405 ' 1014,\n'
403 406 ' 1015,\n'
404 407 ' 1016,\n'
405 408 ' 1017,\n'
406 409 ' 1018,\n'
407 410 ' 1019])'),
408 411 (a, 'deque([deque(...)])'),
409 412 ]
410 413 for obj, expected in cases:
411 414 assert pretty.pretty(obj) == expected
412 415
413 416
414 417 # TODO : pytest.mark.parametrise once nose is gone.
415 418 def test_collections_counter():
416 419 class MyCounter(Counter):
417 420 pass
418 421 cases = [
419 422 (Counter(), 'Counter()'),
420 423 (Counter(a=1), "Counter({'a': 1})"),
421 424 (MyCounter(a=1), "MyCounter({'a': 1})"),
422 425 ]
423 426 for obj, expected in cases:
424 427 assert pretty.pretty(obj) == expected
425 428
426 429 # TODO : pytest.mark.parametrise once nose is gone.
427 430 def test_mappingproxy():
428 431 MP = types.MappingProxyType
429 432 underlying_dict = {}
430 433 mp_recursive = MP(underlying_dict)
431 434 underlying_dict[2] = mp_recursive
432 435 underlying_dict[3] = underlying_dict
433 436
434 437 cases = [
435 438 (MP({}), "mappingproxy({})"),
436 439 (MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"),
437 440 (MP({k: k.upper() for k in string.ascii_lowercase}),
438 441 "mappingproxy({'a': 'A',\n"
439 442 " 'b': 'B',\n"
440 443 " 'c': 'C',\n"
441 444 " 'd': 'D',\n"
442 445 " 'e': 'E',\n"
443 446 " 'f': 'F',\n"
444 447 " 'g': 'G',\n"
445 448 " 'h': 'H',\n"
446 449 " 'i': 'I',\n"
447 450 " 'j': 'J',\n"
448 451 " 'k': 'K',\n"
449 452 " 'l': 'L',\n"
450 453 " 'm': 'M',\n"
451 454 " 'n': 'N',\n"
452 455 " 'o': 'O',\n"
453 456 " 'p': 'P',\n"
454 457 " 'q': 'Q',\n"
455 458 " 'r': 'R',\n"
456 459 " 's': 'S',\n"
457 460 " 't': 'T',\n"
458 461 " 'u': 'U',\n"
459 462 " 'v': 'V',\n"
460 463 " 'w': 'W',\n"
461 464 " 'x': 'X',\n"
462 465 " 'y': 'Y',\n"
463 466 " 'z': 'Z'})"),
464 467 (mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"),
465 468 (underlying_dict,
466 469 "{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"),
467 470 ]
468 471 for obj, expected in cases:
469 472 assert pretty.pretty(obj) == expected
470 473
471 474
472 475 # TODO : pytest.mark.parametrise once nose is gone.
473 476 def test_simplenamespace():
474 477 SN = types.SimpleNamespace
475 478
476 479 sn_recursive = SN()
477 480 sn_recursive.first = sn_recursive
478 481 sn_recursive.second = sn_recursive
479 482 cases = [
480 483 (SN(), "namespace()"),
481 484 (SN(x=SN()), "namespace(x=namespace())"),
482 485 (SN(a_long_name=[SN(s=string.ascii_lowercase)]*3, a_short_name=None),
483 486 "namespace(a_long_name=[namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
484 487 " namespace(s='abcdefghijklmnopqrstuvwxyz'),\n"
485 488 " namespace(s='abcdefghijklmnopqrstuvwxyz')],\n"
486 489 " a_short_name=None)"),
487 490 (sn_recursive, "namespace(first=namespace(...), second=namespace(...))"),
488 491 ]
489 492 for obj, expected in cases:
490 493 assert pretty.pretty(obj) == expected
491 494
492 495
493 496 def test_pretty_environ():
494 497 dict_repr = pretty.pretty(dict(os.environ))
495 498 # reindent to align with 'environ' prefix
496 499 dict_indented = dict_repr.replace('\n', '\n' + (' ' * len('environ')))
497 500 env_repr = pretty.pretty(os.environ)
498 501 assert env_repr == "environ" + dict_indented
499 502
500 503
501 504 def test_function_pretty():
502 505 "Test pretty print of function"
503 506 # posixpath is a pure python module, its interface is consistent
504 507 # across Python distributions
505 508 import posixpath
506 509
507 510 assert pretty.pretty(posixpath.join) == "<function posixpath.join(a, *p)>"
508 511
509 512 # custom function
510 513 def meaning_of_life(question=None):
511 514 if question:
512 515 return 42
513 516 return "Don't panic"
514 517
515 518 assert "meaning_of_life(question=None)" in pretty.pretty(meaning_of_life)
516 519
517 520
518 521 class OrderedCounter(Counter, OrderedDict):
519 522 'Counter that remembers the order elements are first encountered'
520 523
521 524 def __repr__(self):
522 525 return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
523 526
524 527 def __reduce__(self):
525 528 return self.__class__, (OrderedDict(self),)
526 529
527 530 class MySet(set): # Override repr of a basic type
528 531 def __repr__(self):
529 532 return 'mine'
530 533
531 534 def test_custom_repr():
532 535 """A custom repr should override a pretty printer for a parent type"""
533 536 oc = OrderedCounter("abracadabra")
534 537 assert "OrderedCounter(OrderedDict" in pretty.pretty(oc)
535 538
536 539 assert pretty.pretty(MySet()) == "mine"
@@ -1,544 +1,551 b''
1 1 """
2 2 Module to define and register Terminal IPython shortcuts with
3 3 :mod:`prompt_toolkit`
4 4 """
5 5
6 6 # Copyright (c) IPython Development Team.
7 7 # Distributed under the terms of the Modified BSD License.
8 8
9 9 import warnings
10 10 import signal
11 11 import sys
12 12 import re
13 13 import os
14 14 from typing import Callable
15 15
16 16
17 17 from prompt_toolkit.application.current import get_app
18 18 from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER
19 19 from prompt_toolkit.filters import (has_focus, has_selection, Condition,
20 20 vi_insert_mode, emacs_insert_mode, has_completions, vi_mode)
21 21 from prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline
22 22 from prompt_toolkit.key_binding import KeyBindings
23 23 from prompt_toolkit.key_binding.bindings import named_commands as nc
24 24 from prompt_toolkit.key_binding.vi_state import InputMode, ViState
25 25
26 26 from IPython.utils.decorators import undoc
27 27
28 28 @undoc
29 29 @Condition
30 30 def cursor_in_leading_ws():
31 31 before = get_app().current_buffer.document.current_line_before_cursor
32 32 return (not before) or before.isspace()
33 33
34 34
35 # Needed for to accept autosuggestions in vi insert mode
36 def _apply_autosuggest(event):
37 """
38 Apply autosuggestion if at end of line.
39 """
40 b = event.current_buffer
41 d = b.document
42 after_cursor = d.text[d.cursor_position :]
43 lines = after_cursor.split("\n")
44 end_of_current_line = lines[0].strip()
45 suggestion = b.suggestion
46 if (suggestion is not None) and (suggestion.text) and (end_of_current_line == ""):
47 b.insert_text(suggestion.text)
48 else:
49 nc.end_of_line(event)
50
35 51 def create_ipython_shortcuts(shell):
36 52 """Set up the prompt_toolkit keyboard shortcuts for IPython"""
37 53
38 54 kb = KeyBindings()
39 55 insert_mode = vi_insert_mode | emacs_insert_mode
40 56
41 57 if getattr(shell, 'handle_return', None):
42 58 return_handler = shell.handle_return(shell)
43 59 else:
44 60 return_handler = newline_or_execute_outer(shell)
45 61
46 62 kb.add('enter', filter=(has_focus(DEFAULT_BUFFER)
47 63 & ~has_selection
48 64 & insert_mode
49 65 ))(return_handler)
50 66
51 67 def reformat_and_execute(event):
52 68 reformat_text_before_cursor(event.current_buffer, event.current_buffer.document, shell)
53 69 event.current_buffer.validate_and_handle()
54 70
55 71 kb.add('escape', 'enter', filter=(has_focus(DEFAULT_BUFFER)
56 72 & ~has_selection
57 73 & insert_mode
58 74 ))(reformat_and_execute)
59 75
60 76 kb.add("c-\\")(quit)
61 77
62 78 kb.add('c-p', filter=(vi_insert_mode & has_focus(DEFAULT_BUFFER))
63 79 )(previous_history_or_previous_completion)
64 80
65 81 kb.add('c-n', filter=(vi_insert_mode & has_focus(DEFAULT_BUFFER))
66 82 )(next_history_or_next_completion)
67 83
68 84 kb.add('c-g', filter=(has_focus(DEFAULT_BUFFER) & has_completions)
69 85 )(dismiss_completion)
70 86
71 87 kb.add('c-c', filter=has_focus(DEFAULT_BUFFER))(reset_buffer)
72 88
73 89 kb.add('c-c', filter=has_focus(SEARCH_BUFFER))(reset_search_buffer)
74 90
75 91 supports_suspend = Condition(lambda: hasattr(signal, 'SIGTSTP'))
76 92 kb.add('c-z', filter=supports_suspend)(suspend_to_bg)
77 93
78 94 # Ctrl+I == Tab
79 95 kb.add('tab', filter=(has_focus(DEFAULT_BUFFER)
80 96 & ~has_selection
81 97 & insert_mode
82 98 & cursor_in_leading_ws
83 99 ))(indent_buffer)
84 100 kb.add('c-o', filter=(has_focus(DEFAULT_BUFFER) & emacs_insert_mode)
85 101 )(newline_autoindent_outer(shell.input_transformer_manager))
86 102
87 103 kb.add('f2', filter=has_focus(DEFAULT_BUFFER))(open_input_in_editor)
88 104
89 105 @Condition
90 106 def auto_match():
91 107 return shell.auto_match
92 108
93 109 focused_insert = (vi_insert_mode | emacs_insert_mode) & has_focus(DEFAULT_BUFFER)
94 110 _preceding_text_cache = {}
95 111 _following_text_cache = {}
96 112
97 113 def preceding_text(pattern):
98 114 try:
99 115 return _preceding_text_cache[pattern]
100 116 except KeyError:
101 117 pass
102 118 m = re.compile(pattern)
103 119
104 120 def _preceding_text():
105 121 app = get_app()
106 122 return bool(m.match(app.current_buffer.document.current_line_before_cursor))
107 123
108 124 condition = Condition(_preceding_text)
109 125 _preceding_text_cache[pattern] = condition
110 126 return condition
111 127
112 128 def following_text(pattern):
113 129 try:
114 130 return _following_text_cache[pattern]
115 131 except KeyError:
116 132 pass
117 133 m = re.compile(pattern)
118 134
119 135 def _following_text():
120 136 app = get_app()
121 137 return bool(m.match(app.current_buffer.document.current_line_after_cursor))
122 138
123 139 condition = Condition(_following_text)
124 140 _following_text_cache[pattern] = condition
125 141 return condition
126 142
127 143 # auto match
128 144 @kb.add("(", filter=focused_insert & auto_match & following_text(r"[,)}\]]|$"))
129 145 def _(event):
130 146 event.current_buffer.insert_text("()")
131 147 event.current_buffer.cursor_left()
132 148
133 149 @kb.add("[", filter=focused_insert & auto_match & following_text(r"[,)}\]]|$"))
134 150 def _(event):
135 151 event.current_buffer.insert_text("[]")
136 152 event.current_buffer.cursor_left()
137 153
138 154 @kb.add("{", filter=focused_insert & auto_match & following_text(r"[,)}\]]|$"))
139 155 def _(event):
140 156 event.current_buffer.insert_text("{}")
141 157 event.current_buffer.cursor_left()
142 158
143 159 @kb.add(
144 160 '"',
145 161 filter=focused_insert
146 162 & auto_match
147 163 & preceding_text(r'^([^"]+|"[^"]*")*$')
148 164 & following_text(r"[,)}\]]|$"),
149 165 )
150 166 def _(event):
151 167 event.current_buffer.insert_text('""')
152 168 event.current_buffer.cursor_left()
153 169
154 170 @kb.add(
155 171 "'",
156 172 filter=focused_insert
157 173 & auto_match
158 174 & preceding_text(r"^([^']+|'[^']*')*$")
159 175 & following_text(r"[,)}\]]|$"),
160 176 )
161 177 def _(event):
162 178 event.current_buffer.insert_text("''")
163 179 event.current_buffer.cursor_left()
164 180
165 181 # raw string
166 182 @kb.add(
167 183 "(", filter=focused_insert & auto_match & preceding_text(r".*(r|R)[\"'](-*)$")
168 184 )
169 185 def _(event):
170 186 matches = re.match(
171 187 r".*(r|R)[\"'](-*)",
172 188 event.current_buffer.document.current_line_before_cursor,
173 189 )
174 190 dashes = matches.group(2) or ""
175 191 event.current_buffer.insert_text("()" + dashes)
176 192 event.current_buffer.cursor_left(len(dashes) + 1)
177 193
178 194 @kb.add(
179 195 "[", filter=focused_insert & auto_match & preceding_text(r".*(r|R)[\"'](-*)$")
180 196 )
181 197 def _(event):
182 198 matches = re.match(
183 199 r".*(r|R)[\"'](-*)",
184 200 event.current_buffer.document.current_line_before_cursor,
185 201 )
186 202 dashes = matches.group(2) or ""
187 203 event.current_buffer.insert_text("[]" + dashes)
188 204 event.current_buffer.cursor_left(len(dashes) + 1)
189 205
190 206 @kb.add(
191 207 "{", filter=focused_insert & auto_match & preceding_text(r".*(r|R)[\"'](-*)$")
192 208 )
193 209 def _(event):
194 210 matches = re.match(
195 211 r".*(r|R)[\"'](-*)",
196 212 event.current_buffer.document.current_line_before_cursor,
197 213 )
198 214 dashes = matches.group(2) or ""
199 215 event.current_buffer.insert_text("{}" + dashes)
200 216 event.current_buffer.cursor_left(len(dashes) + 1)
201 217
202 218 # just move cursor
203 219 @kb.add(")", filter=focused_insert & auto_match & following_text(r"^\)"))
204 220 @kb.add("]", filter=focused_insert & auto_match & following_text(r"^\]"))
205 221 @kb.add("}", filter=focused_insert & auto_match & following_text(r"^\}"))
206 222 @kb.add('"', filter=focused_insert & auto_match & following_text('^"'))
207 223 @kb.add("'", filter=focused_insert & auto_match & following_text("^'"))
208 224 def _(event):
209 225 event.current_buffer.cursor_right()
210 226
211 227 @kb.add(
212 228 "backspace",
213 229 filter=focused_insert
214 230 & preceding_text(r".*\($")
215 231 & auto_match
216 232 & following_text(r"^\)"),
217 233 )
218 234 @kb.add(
219 235 "backspace",
220 236 filter=focused_insert
221 237 & preceding_text(r".*\[$")
222 238 & auto_match
223 239 & following_text(r"^\]"),
224 240 )
225 241 @kb.add(
226 242 "backspace",
227 243 filter=focused_insert
228 244 & preceding_text(r".*\{$")
229 245 & auto_match
230 246 & following_text(r"^\}"),
231 247 )
232 248 @kb.add(
233 249 "backspace",
234 250 filter=focused_insert
235 251 & preceding_text('.*"$')
236 252 & auto_match
237 253 & following_text('^"'),
238 254 )
239 255 @kb.add(
240 256 "backspace",
241 257 filter=focused_insert
242 258 & preceding_text(r".*'$")
243 259 & auto_match
244 260 & following_text(r"^'"),
245 261 )
246 262 def _(event):
247 263 event.current_buffer.delete()
248 264 event.current_buffer.delete_before_cursor()
249 265
250 266 if shell.display_completions == "readlinelike":
251 267 kb.add(
252 268 "c-i",
253 269 filter=(
254 270 has_focus(DEFAULT_BUFFER)
255 271 & ~has_selection
256 272 & insert_mode
257 273 & ~cursor_in_leading_ws
258 274 ),
259 275 )(display_completions_like_readline)
260 276
261 277 if sys.platform == "win32":
262 278 kb.add("c-v", filter=(has_focus(DEFAULT_BUFFER) & ~vi_mode))(win_paste)
263 279
264 280 @Condition
265 281 def ebivim():
266 282 return shell.emacs_bindings_in_vi_insert_mode
267 283
268 284 focused_insert_vi = has_focus(DEFAULT_BUFFER) & vi_insert_mode
269 285
270 # Needed for to accept autosuggestions in vi insert mode
271 def _apply_autosuggest(event):
272 b = event.current_buffer
273 suggestion = b.suggestion
274 if suggestion is not None and suggestion.text:
275 b.insert_text(suggestion.text)
276 else:
277 nc.end_of_line(event)
278
279 286 @kb.add("end", filter=has_focus(DEFAULT_BUFFER) & (ebivim | ~vi_insert_mode))
280 287 def _(event):
281 288 _apply_autosuggest(event)
282 289
283 290 @kb.add("c-e", filter=focused_insert_vi & ebivim)
284 291 def _(event):
285 292 _apply_autosuggest(event)
286 293
287 294 @kb.add("c-f", filter=focused_insert_vi)
288 295 def _(event):
289 296 b = event.current_buffer
290 297 suggestion = b.suggestion
291 298 if suggestion:
292 299 b.insert_text(suggestion.text)
293 300 else:
294 301 nc.forward_char(event)
295 302
296 303 @kb.add("escape", "f", filter=focused_insert_vi & ebivim)
297 304 def _(event):
298 305 b = event.current_buffer
299 306 suggestion = b.suggestion
300 307 if suggestion:
301 308 t = re.split(r"(\S+\s+)", suggestion.text)
302 309 b.insert_text(next((x for x in t if x), ""))
303 310 else:
304 311 nc.forward_word(event)
305 312
306 313 # Simple Control keybindings
307 314 key_cmd_dict = {
308 315 "c-a": nc.beginning_of_line,
309 316 "c-b": nc.backward_char,
310 317 "c-k": nc.kill_line,
311 318 "c-w": nc.backward_kill_word,
312 319 "c-y": nc.yank,
313 320 "c-_": nc.undo,
314 321 }
315 322
316 323 for key, cmd in key_cmd_dict.items():
317 324 kb.add(key, filter=focused_insert_vi & ebivim)(cmd)
318 325
319 326 # Alt and Combo Control keybindings
320 327 keys_cmd_dict = {
321 328 # Control Combos
322 329 ("c-x", "c-e"): nc.edit_and_execute,
323 330 ("c-x", "e"): nc.edit_and_execute,
324 331 # Alt
325 332 ("escape", "b"): nc.backward_word,
326 333 ("escape", "c"): nc.capitalize_word,
327 334 ("escape", "d"): nc.kill_word,
328 335 ("escape", "h"): nc.backward_kill_word,
329 336 ("escape", "l"): nc.downcase_word,
330 337 ("escape", "u"): nc.uppercase_word,
331 338 ("escape", "y"): nc.yank_pop,
332 339 ("escape", "."): nc.yank_last_arg,
333 340 }
334 341
335 342 for keys, cmd in keys_cmd_dict.items():
336 343 kb.add(*keys, filter=focused_insert_vi & ebivim)(cmd)
337 344
338 345 def get_input_mode(self):
339 346 app = get_app()
340 347 app.ttimeoutlen = shell.ttimeoutlen
341 348 app.timeoutlen = shell.timeoutlen
342 349
343 350 return self._input_mode
344 351
345 352 def set_input_mode(self, mode):
346 353 shape = {InputMode.NAVIGATION: 2, InputMode.REPLACE: 4}.get(mode, 6)
347 354 cursor = "\x1b[{} q".format(shape)
348 355
349 356 sys.stdout.write(cursor)
350 357 sys.stdout.flush()
351 358
352 359 self._input_mode = mode
353 360
354 361 if shell.editing_mode == "vi" and shell.modal_cursor:
355 362 ViState._input_mode = InputMode.INSERT
356 363 ViState.input_mode = property(get_input_mode, set_input_mode)
357 364
358 365 return kb
359 366
360 367
361 368 def reformat_text_before_cursor(buffer, document, shell):
362 369 text = buffer.delete_before_cursor(len(document.text[:document.cursor_position]))
363 370 try:
364 371 formatted_text = shell.reformat_handler(text)
365 372 buffer.insert_text(formatted_text)
366 373 except Exception as e:
367 374 buffer.insert_text(text)
368 375
369 376
370 377 def newline_or_execute_outer(shell):
371 378
372 379 def newline_or_execute(event):
373 380 """When the user presses return, insert a newline or execute the code."""
374 381 b = event.current_buffer
375 382 d = b.document
376 383
377 384 if b.complete_state:
378 385 cc = b.complete_state.current_completion
379 386 if cc:
380 387 b.apply_completion(cc)
381 388 else:
382 389 b.cancel_completion()
383 390 return
384 391
385 392 # If there's only one line, treat it as if the cursor is at the end.
386 393 # See https://github.com/ipython/ipython/issues/10425
387 394 if d.line_count == 1:
388 395 check_text = d.text
389 396 else:
390 397 check_text = d.text[:d.cursor_position]
391 398 status, indent = shell.check_complete(check_text)
392 399
393 400 # if all we have after the cursor is whitespace: reformat current text
394 401 # before cursor
395 402 after_cursor = d.text[d.cursor_position:]
396 403 reformatted = False
397 404 if not after_cursor.strip():
398 405 reformat_text_before_cursor(b, d, shell)
399 406 reformatted = True
400 407 if not (d.on_last_line or
401 408 d.cursor_position_row >= d.line_count - d.empty_line_count_at_the_end()
402 409 ):
403 410 if shell.autoindent:
404 411 b.insert_text('\n' + indent)
405 412 else:
406 413 b.insert_text('\n')
407 414 return
408 415
409 416 if (status != 'incomplete') and b.accept_handler:
410 417 if not reformatted:
411 418 reformat_text_before_cursor(b, d, shell)
412 419 b.validate_and_handle()
413 420 else:
414 421 if shell.autoindent:
415 422 b.insert_text('\n' + indent)
416 423 else:
417 424 b.insert_text('\n')
418 425 return newline_or_execute
419 426
420 427
421 428 def previous_history_or_previous_completion(event):
422 429 """
423 430 Control-P in vi edit mode on readline is history next, unlike default prompt toolkit.
424 431
425 432 If completer is open this still select previous completion.
426 433 """
427 434 event.current_buffer.auto_up()
428 435
429 436
430 437 def next_history_or_next_completion(event):
431 438 """
432 439 Control-N in vi edit mode on readline is history previous, unlike default prompt toolkit.
433 440
434 441 If completer is open this still select next completion.
435 442 """
436 443 event.current_buffer.auto_down()
437 444
438 445
439 446 def dismiss_completion(event):
440 447 b = event.current_buffer
441 448 if b.complete_state:
442 449 b.cancel_completion()
443 450
444 451
445 452 def reset_buffer(event):
446 453 b = event.current_buffer
447 454 if b.complete_state:
448 455 b.cancel_completion()
449 456 else:
450 457 b.reset()
451 458
452 459
453 460 def reset_search_buffer(event):
454 461 if event.current_buffer.document.text:
455 462 event.current_buffer.reset()
456 463 else:
457 464 event.app.layout.focus(DEFAULT_BUFFER)
458 465
459 466 def suspend_to_bg(event):
460 467 event.app.suspend_to_background()
461 468
462 469 def quit(event):
463 470 """
464 471 On platforms that support SIGQUIT, send SIGQUIT to the current process.
465 472 On other platforms, just exit the process with a message.
466 473 """
467 474 sigquit = getattr(signal, "SIGQUIT", None)
468 475 if sigquit is not None:
469 476 os.kill(0, signal.SIGQUIT)
470 477 else:
471 478 sys.exit("Quit")
472 479
473 480 def indent_buffer(event):
474 481 event.current_buffer.insert_text(' ' * 4)
475 482
476 483 @undoc
477 484 def newline_with_copy_margin(event):
478 485 """
479 486 DEPRECATED since IPython 6.0
480 487
481 488 See :any:`newline_autoindent_outer` for a replacement.
482 489
483 490 Preserve margin and cursor position when using
484 491 Control-O to insert a newline in EMACS mode
485 492 """
486 493 warnings.warn("`newline_with_copy_margin(event)` is deprecated since IPython 6.0. "
487 494 "see `newline_autoindent_outer(shell)(event)` for a replacement.",
488 495 DeprecationWarning, stacklevel=2)
489 496
490 497 b = event.current_buffer
491 498 cursor_start_pos = b.document.cursor_position_col
492 499 b.newline(copy_margin=True)
493 500 b.cursor_up(count=1)
494 501 cursor_end_pos = b.document.cursor_position_col
495 502 if cursor_start_pos != cursor_end_pos:
496 503 pos_diff = cursor_start_pos - cursor_end_pos
497 504 b.cursor_right(count=pos_diff)
498 505
499 506 def newline_autoindent_outer(inputsplitter) -> Callable[..., None]:
500 507 """
501 508 Return a function suitable for inserting a indented newline after the cursor.
502 509
503 510 Fancier version of deprecated ``newline_with_copy_margin`` which should
504 511 compute the correct indentation of the inserted line. That is to say, indent
505 512 by 4 extra space after a function definition, class definition, context
506 513 manager... And dedent by 4 space after ``pass``, ``return``, ``raise ...``.
507 514 """
508 515
509 516 def newline_autoindent(event):
510 517 """insert a newline after the cursor indented appropriately."""
511 518 b = event.current_buffer
512 519 d = b.document
513 520
514 521 if b.complete_state:
515 522 b.cancel_completion()
516 523 text = d.text[:d.cursor_position] + '\n'
517 524 _, indent = inputsplitter.check_complete(text)
518 525 b.insert_text('\n' + (' ' * (indent or 0)), move_cursor=False)
519 526
520 527 return newline_autoindent
521 528
522 529
523 530 def open_input_in_editor(event):
524 531 event.app.current_buffer.open_in_editor()
525 532
526 533
527 534 if sys.platform == 'win32':
528 535 from IPython.core.error import TryNext
529 536 from IPython.lib.clipboard import (ClipboardEmpty,
530 537 win32_clipboard_get,
531 538 tkinter_clipboard_get)
532 539
533 540 @undoc
534 541 def win_paste(event):
535 542 try:
536 543 text = win32_clipboard_get()
537 544 except TryNext:
538 545 try:
539 546 text = tkinter_clipboard_get()
540 547 except (TryNext, ClipboardEmpty):
541 548 return
542 549 except ClipboardEmpty:
543 550 return
544 551 event.current_buffer.insert_text(text.replace("\t", " " * 4))
@@ -1,171 +1,171 b''
1 .. image:: https://codecov.io/github/ipython/ipython/coverage.svg?branch=master
2 :target: https://codecov.io/github/ipython/ipython?branch=master
1 .. image:: https://codecov.io/github/ipython/ipython/coverage.svg?branch=main
2 :target: https://codecov.io/github/ipython/ipython?branch=main
3 3
4 4 .. image:: https://img.shields.io/pypi/v/IPython.svg
5 5 :target: https://pypi.python.org/pypi/ipython
6 6
7 7 .. image:: https://github.com/ipython/ipython/actions/workflows/test.yml/badge.svg
8 8 :target: https://github.com/ipython/ipython/actions/workflows/test.yml)
9 9
10 10 .. image:: https://www.codetriage.com/ipython/ipython/badges/users.svg
11 11 :target: https://www.codetriage.com/ipython/ipython/
12 12
13 13 .. image:: https://raster.shields.io/badge/Follows-NEP29-brightgreen.png
14 14 :target: https://numpy.org/neps/nep-0029-deprecation_policy.html
15 15
16 16
17 17 ===========================================
18 18 IPython: Productive Interactive Computing
19 19 ===========================================
20 20
21 21 Overview
22 22 ========
23 23
24 24 Welcome to IPython. Our full documentation is available on `ipython.readthedocs.io
25 25 <https://ipython.readthedocs.io/en/stable/>`_ and contains information on how to install, use, and
26 26 contribute to the project.
27 27 IPython (Interactive Python) is a command shell for interactive computing in multiple programming languages, originally developed for the Python programming language, that offers introspection, rich media, shell syntax, tab completion, and history.
28 28
29 29 **IPython versions and Python Support**
30 30
31 31 Starting with IPython 7.10, IPython follows `NEP 29 <https://numpy.org/neps/nep-0029-deprecation_policy.html>`_
32 32
33 33 **IPython 7.17+** requires Python version 3.7 and above.
34 34
35 35 **IPython 7.10+** requires Python version 3.6 and above.
36 36
37 37 **IPython 7.0** requires Python version 3.5 and above.
38 38
39 39 **IPython 6.x** requires Python version 3.3 and above.
40 40
41 41 **IPython 5.x LTS** is the compatible release for Python 2.7.
42 42 If you require Python 2 support, you **must** use IPython 5.x LTS. Please
43 43 update your project configurations and requirements as necessary.
44 44
45 45
46 46 The Notebook, Qt console and a number of other pieces are now parts of *Jupyter*.
47 47 See the `Jupyter installation docs <https://jupyter.readthedocs.io/en/latest/install.html>`__
48 48 if you want to use these.
49 49
50 50 Main features of IPython
51 51 ========================
52 52 Comprehensive object introspection.
53 53
54 54 Input history, persistent across sessions.
55 55
56 56 Caching of output results during a session with automatically generated references.
57 57
58 58 Extensible tab completion, with support by default for completion of python variables and keywords, filenames and function keywords.
59 59
60 60 Extensible system of β€˜magic’ commands for controlling the environment and performing many tasks related to IPython or the operating system.
61 61
62 62 A rich configuration system with easy switching between different setups (simpler than changing $PYTHONSTARTUP environment variables every time).
63 63
64 64 Session logging and reloading.
65 65
66 66 Extensible syntax processing for special purpose situations.
67 67
68 68 Access to the system shell with user-extensible alias system.
69 69
70 70 Easily embeddable in other Python programs and GUIs.
71 71
72 72 Integrated access to the pdb debugger and the Python profiler.
73 73
74 74
75 75 Development and Instant running
76 76 ===============================
77 77
78 78 You can find the latest version of the development documentation on `readthedocs
79 79 <https://ipython.readthedocs.io/en/latest/>`_.
80 80
81 81 You can run IPython from this directory without even installing it system-wide
82 82 by typing at the terminal::
83 83
84 84 $ python -m IPython
85 85
86 86 Or see the `development installation docs
87 87 <https://ipython.readthedocs.io/en/latest/install/install.html#installing-the-development-version>`_
88 88 for the latest revision on read the docs.
89 89
90 90 Documentation and installation instructions for older version of IPython can be
91 91 found on the `IPython website <https://ipython.org/documentation.html>`_
92 92
93 93
94 94
95 95 IPython requires Python version 3 or above
96 96 ==========================================
97 97
98 98 Starting with version 6.0, IPython does not support Python 2.7, 3.0, 3.1, or
99 99 3.2.
100 100
101 101 For a version compatible with Python 2.7, please install the 5.x LTS Long Term
102 102 Support version.
103 103
104 104 If you are encountering this error message you are likely trying to install or
105 105 use IPython from source. You need to checkout the remote 5.x branch. If you are
106 106 using git the following should work::
107 107
108 108 $ git fetch origin
109 109 $ git checkout 5.x
110 110
111 111 If you encounter this error message with a regular install of IPython, then you
112 112 likely need to update your package manager, for example if you are using `pip`
113 113 check the version of pip with::
114 114
115 115 $ pip --version
116 116
117 117 You will need to update pip to the version 9.0.1 or greater. If you are not using
118 118 pip, please inquiry with the maintainers of the package for your package
119 119 manager.
120 120
121 121 For more information see one of our blog posts:
122 122
123 123 https://blog.jupyter.org/release-of-ipython-5-0-8ce60b8d2e8e
124 124
125 125 As well as the following Pull-Request for discussion:
126 126
127 127 https://github.com/ipython/ipython/pull/9900
128 128
129 129 This error does also occur if you are invoking ``setup.py`` directly – which you
130 130 should not – or are using ``easy_install`` If this is the case, use ``pip
131 131 install .`` instead of ``setup.py install`` , and ``pip install -e .`` instead
132 132 of ``setup.py develop`` If you are depending on IPython as a dependency you may
133 133 also want to have a conditional dependency on IPython depending on the Python
134 134 version::
135 135
136 136 install_req = ['ipython']
137 137 if sys.version_info[0] < 3 and 'bdist_wheel' not in sys.argv:
138 138 install_req.remove('ipython')
139 139 install_req.append('ipython<6')
140 140
141 141 setup(
142 142 ...
143 143 install_requires=install_req
144 144 )
145 145
146 146 Alternatives to IPython
147 147 =======================
148 148
149 149 IPython may not be to your taste; if that's the case there might be similar
150 150 project that you might want to use:
151 151
152 152 - The classic Python REPL.
153 153 - `bpython <https://bpython-interpreter.org/>`_
154 154 - `mypython <https://www.asmeurer.com/mypython/>`_
155 155 - `ptpython and ptipython <https://pypi.org/project/ptpython/>`_
156 156 - `Xonsh <https://xon.sh/>`_
157 157
158 158 Ignoring commits with git blame.ignoreRevsFile
159 159 ==============================================
160 160
161 161 As of git 2.23, it is possible to make formatting changes without breaking
162 162 ``git blame``. See the `git documentation
163 163 <https://git-scm.com/docs/git-config#Documentation/git-config.txt-blameignoreRevsFile>`_
164 164 for more details.
165 165
166 166 To use this feature you must:
167 167
168 168 - Install git >= 2.23
169 169 - Configure your local git repo by running:
170 170 - POSIX: ``tools\configure-git-blame-ignore-revs.sh``
171 171 - Windows: ``tools\configure-git-blame-ignore-revs.bat``
@@ -1,320 +1,320 b''
1 1 .. _core_developer_guide:
2 2
3 3 =================================
4 4 Guide for IPython core Developers
5 5 =================================
6 6
7 7 This guide documents the development of IPython itself. Alternatively,
8 8 developers of third party tools and libraries that use IPython should see the
9 9 :doc:`../development/index`.
10 10
11 11
12 12 For instructions on how to make a developer install see :ref:`devinstall`.
13 13
14 14 Backporting Pull requests
15 15 =========================
16 16
17 All pull requests should usually be made against ``master``, if a Pull Request
17 All pull requests should usually be made against ``main``, if a Pull Request
18 18 need to be backported to an earlier release; then it should be tagged with the
19 19 correct ``milestone``.
20 20
21 21 If you tag a pull request with a milestone **before** merging the pull request,
22 and the base ref is ``master``, then our backport bot should automatically create
22 and the base ref is ``main``, then our backport bot should automatically create
23 23 a corresponding pull-request that backport on the correct branch.
24 24
25 25 If you have write access to the IPython repository you can also just mention the
26 26 **backport bot** to do the work for you. The bot is evolving so instructions may
27 27 be different. At the time of this writing you can use::
28 28
29 29 @meeseeksdev[bot] backport [to] <branchname>
30 30
31 31 The bot will attempt to backport the current pull-request and issue a PR if
32 32 possible.
33 33
34 34 .. note::
35 35
36 36 The ``@`` and ``[bot]`` when mentioning the bot should be optional and can
37 37 be omitted.
38 38
39 39 If the pull request cannot be automatically backported, the bot should tell you
40 40 so on the PR and apply a "Need manual backport" tag to the origin PR.
41 41
42 42 .. _release_process:
43 43
44 44 IPython release process
45 45 =======================
46 46
47 47 This document contains the process that is used to create an IPython release.
48 48
49 49 Conveniently, the ``release`` script in the ``tools`` directory of the ``IPython``
50 50 repository automates most of the release process. This document serves as a
51 51 handy reminder and checklist for the release manager.
52 52
53 53 During the release process, you might need the extra following dependencies:
54 54
55 55 - ``keyring`` to access your GitHub authentication tokens
56 56 - ``graphviz`` to generate some graphs in the documentation
57 57 - ``ghpro`` to generate the stats
58 58
59 59 Make sure you have all the required dependencies to run the tests as well.
60 60
61 61 You can try to ``source tools/release_helper.sh`` when releasing via bash, it
62 62 should guide you through most of the process.
63 63
64 64
65 65 1. Set Environment variables
66 66 ----------------------------
67 67
68 68 Set environment variables to document previous release tag, current
69 69 release milestone, current release version, and git tag.
70 70
71 71 These variables may be used later to copy/paste as answers to the script
72 72 questions instead of typing the appropriate command when the time comes. These
73 73 variables are not used by the scripts directly; therefore, there is no need to
74 74 ``export`` them. The format for bash is as follows, but note that these values
75 75 are just an example valid only for the 5.0 release; you'll need to update them
76 76 for the release you are actually making::
77 77
78 78 PREV_RELEASE=4.2.1
79 79 MILESTONE=5.0
80 80 VERSION=5.0.0
81 BRANCH=master
81 BRANCH=main
82 82
83 83 For `reproducibility of builds <https://reproducible-builds.org/specs/source-date-epoch/>`_,
84 84 we recommend setting ``SOURCE_DATE_EPOCH`` prior to running the build; record the used value
85 85 of ``SOURCE_DATE_EPOCH`` as it may not be available from build artifact. You
86 86 should be able to use ``date +%s`` to get a formatted timestamp::
87 87
88 88 SOURCE_DATE_EPOCH=$(date +%s)
89 89
90 90
91 91 2. Create GitHub stats and finish release note
92 92 ----------------------------------------------
93 93
94 94 .. note::
95 95
96 96 This step is optional if making a Beta or RC release.
97 97
98 98 .. note::
99 99
100 100 Before generating the GitHub stats, verify that all closed issues and pull
101 101 requests have `appropriate milestones
102 102 <https://github.com/ipython/ipython/wiki/Dev:-GitHub-workflow#milestones>`_.
103 103 `This search
104 104 <https://github.com/ipython/ipython/issues?q=is%3Aclosed+no%3Amilestone+is%3Aissue>`_
105 105 should return no results before creating the GitHub stats.
106 106
107 107 If a major release:
108 108
109 109 - merge any pull request notes into what's new::
110 110
111 111 python tools/update_whatsnew.py
112 112
113 113 - update ``docs/source/whatsnew/development.rst``, to ensure it covers
114 114 the major release features
115 115
116 116 - move the contents of ``development.rst`` to ``versionX.rst`` where ``X`` is
117 117 the numerical release version
118 118
119 119 - generate summary of GitHub contributions, which can be done with::
120 120
121 121 python tools/github_stats.py --milestone $MILESTONE > stats.rst
122 122
123 123 which may need some manual cleanup of ``stats.rst``. Add the cleaned
124 124 ``stats.rst`` results to ``docs/source/whatsnew/github-stats-X.rst``
125 125 where ``X`` is the numerical release version (don't forget to add it to
126 126 the git repository as well). If creating a major release, make a new
127 127 ``github-stats-X.rst`` file; if creating a minor release, the content
128 128 from ``stats.rst`` may simply be added to the top of an existing
129 129 ``github-stats-X.rst`` file.
130 130
131 131 - Edit ``docs/source/whatsnew/index.rst`` to list the new ``github-stats-X``
132 132 file you just created.
133 133
134 134 - You do not need to temporarily remove the first entry called
135 135 ``development``, nor re-add it after the release, it will automatically be
136 136 hidden when releasing a stable version of IPython (if ``_version_extra``
137 137 in ``release.py`` is an empty string.
138 138
139 139 Make sure that the stats file has a header or it won't be rendered in
140 140 the final documentation.
141 141
142 142 To find duplicates and update `.mailmap`, use::
143 143
144 144 git log --format="%aN <%aE>" $PREV_RELEASE... | sort -u -f
145 145
146 146 If a minor release you might need to do some of the above points manually, and
147 147 forward port the changes.
148 148
149 149 3. Make sure the repository is clean
150 150 ------------------------------------
151 151
152 152 of any file that could be problematic.
153 153 Remove all non-tracked files with:
154 154
155 155 .. code::
156 156
157 157 git clean -xfdi
158 158
159 159 This will ask for confirmation before removing all untracked files. Make
160 160 sure the ``dist/`` folder is clean to avoid any stale builds from
161 161 previous build attempts.
162 162
163 163
164 164 4. Update the release version number
165 165 ------------------------------------
166 166
167 167 Edit ``IPython/core/release.py`` to have the current version.
168 168
169 169 in particular, update version number and ``_version_extra`` content in
170 170 ``IPython/core/release.py``.
171 171
172 172 Step 5 will validate your changes automatically, but you might still want to
173 173 make sure the version number matches pep440.
174 174
175 175 In particular, ``rc`` and ``beta`` are not separated by ``.`` or the ``sdist``
176 176 and ``bdist`` will appear as different releases. For example, a valid version
177 177 number for a release candidate (rc) release is: ``1.3rc1``. Notice that there
178 178 is no separator between the '3' and the 'r'. Check the environment variable
179 179 ``$VERSION`` as well.
180 180
181 181 You will likely just have to modify/comment/uncomment one of the lines setting
182 182 ``_version_extra``
183 183
184 184
185 185 5. Run the `tools/build_release` script
186 186 ---------------------------------------
187 187
188 188 Running ``tools/build_release`` does all the file checking and building that
189 189 the real release script will do. This makes test installations, checks that
190 190 the build procedure runs OK, and tests other steps in the release process.
191 191
192 192 The ``build_release`` script will in particular verify that the version number
193 193 match PEP 440, in order to avoid surprise at the time of build upload.
194 194
195 195 We encourage creating a test build of the docs as well.
196 196
197 197 6. Create and push the new tag
198 198 ------------------------------
199 199
200 200 Commit the changes to release.py::
201 201
202 202 git commit -am "release $VERSION" -S
203 203 git push origin $BRANCH
204 204
205 205 (omit the ``-S`` if you are no signing the package)
206 206
207 207 Create and push the tag::
208 208
209 209 git tag -am "release $VERSION" "$VERSION" -s
210 210 git push origin $VERSION
211 211
212 212 (omit the ``-s`` if you are no signing the package)
213 213
214 214 Update release.py back to ``x.y-dev`` or ``x.y-maint`` commit and push::
215 215
216 216 git commit -am "back to development" -S
217 217 git push origin $BRANCH
218 218
219 219 (omit the ``-S`` if you are no signing the package)
220 220
221 221 Now checkout the tag we just made::
222 222
223 223 git checkout $VERSION
224 224
225 225 7. Run the release script
226 226 -------------------------
227 227
228 228 Run the ``release`` script, this step requires having a current wheel, Python
229 229 >=3.4 and Python 2.7.::
230 230
231 231 ./tools/release
232 232
233 233 This makes the tarballs and wheels, and puts them under the ``dist/``
234 234 folder. Be sure to test the ``wheels`` and the ``sdist`` locally before
235 235 uploading them to PyPI. We do not use an universal wheel as each wheel
236 236 installs an ``ipython2`` or ``ipython3`` script, depending on the version of
237 237 Python it is built for. Using an universal wheel would prevent this.
238 238
239 239 Check the shasum of files with::
240 240
241 241 shasum -a 256 dist/*
242 242
243 243 and takes notes of them you might need them to update the conda-forge recipes.
244 244 Rerun the command and check the hash have not changed::
245 245
246 246 ./tools/release
247 247 shasum -a 256 dist/*
248 248
249 249 Use the following to actually upload the result of the build::
250 250
251 251 ./tools/release upload
252 252
253 253 It should posts them to ``archive.ipython.org`` and to PyPI.
254 254
255 255 PyPI/Warehouse will automatically hide previous releases. If you are uploading
256 256 a non-stable version, make sure to log-in to PyPI and un-hide previous version.
257 257
258 258
259 259 8. Draft a short release announcement
260 260 -------------------------------------
261 261
262 262 The announcement should include:
263 263
264 264 - release highlights
265 265 - a link to the html version of the *What's new* section of the documentation
266 266 - a link to upgrade or installation tips (if necessary)
267 267
268 268 Post the announcement to the mailing list and or blog, and link from Twitter.
269 269
270 270 .. note::
271 271
272 272 If you are doing a RC or Beta, you can likely skip the next steps.
273 273
274 274 9. Update milestones on GitHub
275 275 -------------------------------
276 276
277 277 These steps will bring milestones up to date:
278 278
279 279 - close the just released milestone
280 280 - open a new milestone for the next release (x, y+1), if the milestone doesn't
281 281 exist already
282 282
283 283 10. Update the IPython website
284 284 ------------------------------
285 285
286 286 The IPython website should document the new release:
287 287
288 288 - add release announcement (news, announcements)
289 289 - update current version and download links
290 290 - update links on the documentation page (especially if a major release)
291 291
292 292 11. Update readthedocs
293 293 ----------------------
294 294
295 295 Make sure to update readthedocs and set the latest tag as stable, as well as
296 296 checking that previous release is still building under its own tag.
297 297
298 298 12. Update the Conda-Forge feedstock
299 299 ------------------------------------
300 300
301 301 Follow the instructions on `the repository <https://github.com/conda-forge/ipython-feedstock>`_
302 302
303 303 13. Celebrate!
304 304 --------------
305 305
306 306 Celebrate the release and please thank the contributors for their work. Great
307 307 job!
308 308
309 309
310 310
311 311 Old Documentation
312 312 =================
313 313
314 314 Out of date documentation is still available and have been kept for archival purposes.
315 315
316 316 .. note::
317 317
318 318 Developers documentation used to be on the IPython wiki, but are now out of
319 319 date. The wiki is though still available for historical reasons: `Old IPython
320 320 GitHub Wiki. <https://github.com/ipython/ipython/wiki/Dev:-Index>`_
@@ -1,285 +1,285 b''
1 1 {
2 2 "cells": [
3 3 {
4 4 "cell_type": "markdown",
5 5 "metadata": {},
6 6 "source": [
7 7 "# A few things that work best/only at the IPython terminal or Qt console clients"
8 8 ]
9 9 },
10 10 {
11 11 "cell_type": "markdown",
12 12 "metadata": {},
13 13 "source": [
14 14 "## Running code with `%run`"
15 15 ]
16 16 },
17 17 {
18 18 "cell_type": "code",
19 19 "execution_count": 1,
20 20 "metadata": {
21 21 "collapsed": false
22 22 },
23 23 "outputs": [
24 24 {
25 25 "name": "stdout",
26 26 "output_type": "stream",
27 27 "text": [
28 28 "Writing script.py\n"
29 29 ]
30 30 }
31 31 ],
32 32 "source": [
33 33 "%%writefile script.py\n",
34 34 "x = 10\n",
35 35 "y = 20\n",
36 36 "z = x+y\n",
37 37 "print('z is: %s' % z)"
38 38 ]
39 39 },
40 40 {
41 41 "cell_type": "code",
42 42 "execution_count": 2,
43 43 "metadata": {
44 44 "collapsed": false
45 45 },
46 46 "outputs": [
47 47 {
48 48 "name": "stdout",
49 49 "output_type": "stream",
50 50 "text": [
51 51 "z is: 30\n"
52 52 ]
53 53 }
54 54 ],
55 55 "source": [
56 56 "%run script"
57 57 ]
58 58 },
59 59 {
60 60 "cell_type": "code",
61 61 "execution_count": 3,
62 62 "metadata": {
63 63 "collapsed": false
64 64 },
65 65 "outputs": [
66 66 {
67 67 "data": {
68 68 "text/plain": [
69 69 "10"
70 70 ]
71 71 },
72 72 "execution_count": 3,
73 73 "metadata": {},
74 74 "output_type": "execute_result"
75 75 }
76 76 ],
77 77 "source": [
78 78 "x"
79 79 ]
80 80 },
81 81 {
82 82 "cell_type": "markdown",
83 83 "metadata": {},
84 84 "source": [
85 85 "## Event loop and GUI integration"
86 86 ]
87 87 },
88 88 {
89 89 "cell_type": "markdown",
90 90 "metadata": {},
91 91 "source": [
92 92 "The `%gui` magic enables the integration of GUI event loops with the interactive execution loop, allowing you to run GUI code without blocking IPython.\n",
93 93 "\n",
94 94 "Consider for example the execution of Qt-based code. Once we enable the Qt gui support:"
95 95 ]
96 96 },
97 97 {
98 98 "cell_type": "code",
99 99 "execution_count": 4,
100 100 "metadata": {
101 101 "collapsed": false
102 102 },
103 103 "outputs": [],
104 104 "source": [
105 105 "%gui qt"
106 106 ]
107 107 },
108 108 {
109 109 "cell_type": "markdown",
110 110 "metadata": {},
111 111 "source": [
112 112 "We can define a simple Qt application class (simplified version from [this Qt tutorial](http://zetcode.com/tutorials/pyqt4/firstprograms)):"
113 113 ]
114 114 },
115 115 {
116 116 "cell_type": "code",
117 117 "execution_count": 5,
118 118 "metadata": {
119 119 "collapsed": false
120 120 },
121 121 "outputs": [],
122 122 "source": [
123 123 "import sys\n",
124 124 "from PyQt4 import QtGui, QtCore\n",
125 125 "\n",
126 126 "class SimpleWindow(QtGui.QWidget):\n",
127 127 " def __init__(self, parent=None):\n",
128 128 " QtGui.QWidget.__init__(self, parent)\n",
129 129 "\n",
130 130 " self.setGeometry(300, 300, 200, 80)\n",
131 131 " self.setWindowTitle('Hello World')\n",
132 132 "\n",
133 133 " quit = QtGui.QPushButton('Close', self)\n",
134 134 " quit.setGeometry(10, 10, 60, 35)\n",
135 135 "\n",
136 136 " self.connect(quit, QtCore.SIGNAL('clicked()'),\n",
137 137 " self, QtCore.SLOT('close()'))"
138 138 ]
139 139 },
140 140 {
141 141 "cell_type": "markdown",
142 142 "metadata": {},
143 143 "source": [
144 144 "And now we can instantiate it:"
145 145 ]
146 146 },
147 147 {
148 148 "cell_type": "code",
149 149 "execution_count": 6,
150 150 "metadata": {
151 151 "collapsed": false
152 152 },
153 153 "outputs": [],
154 154 "source": [
155 155 "app = QtCore.QCoreApplication.instance()\n",
156 156 "if app is None:\n",
157 157 " app = QtGui.QApplication([])\n",
158 158 "\n",
159 159 "sw = SimpleWindow()\n",
160 160 "sw.show()\n",
161 161 "\n",
162 162 "from IPython.lib.guisupport import start_event_loop_qt4\n",
163 163 "start_event_loop_qt4(app)"
164 164 ]
165 165 },
166 166 {
167 167 "cell_type": "markdown",
168 168 "metadata": {},
169 169 "source": [
170 170 "But IPython still remains responsive:"
171 171 ]
172 172 },
173 173 {
174 174 "cell_type": "code",
175 175 "execution_count": 7,
176 176 "metadata": {
177 177 "collapsed": false
178 178 },
179 179 "outputs": [
180 180 {
181 181 "data": {
182 182 "text/plain": [
183 183 "12"
184 184 ]
185 185 },
186 186 "execution_count": 7,
187 187 "metadata": {},
188 188 "output_type": "execute_result"
189 189 }
190 190 ],
191 191 "source": [
192 192 "10+2"
193 193 ]
194 194 },
195 195 {
196 196 "cell_type": "markdown",
197 197 "metadata": {},
198 198 "source": [
199 "The `%gui` magic can be similarly used to control Wx, Tk, glut and pyglet applications, [as can be seen in our examples](https://github.com/ipython/ipython/tree/master/examples/lib)."
199 "The `%gui` magic can be similarly used to control Wx, Tk, glut and pyglet applications, [as can be seen in our examples](https://github.com/ipython/ipython/tree/main/examples/lib)."
200 200 ]
201 201 },
202 202 {
203 203 "cell_type": "markdown",
204 204 "metadata": {},
205 205 "source": [
206 206 "## Embedding IPython in a terminal application"
207 207 ]
208 208 },
209 209 {
210 210 "cell_type": "code",
211 211 "execution_count": 8,
212 212 "metadata": {
213 213 "collapsed": false
214 214 },
215 215 "outputs": [
216 216 {
217 217 "name": "stdout",
218 218 "output_type": "stream",
219 219 "text": [
220 220 "Writing simple-embed.py\n"
221 221 ]
222 222 }
223 223 ],
224 224 "source": [
225 225 "%%writefile simple-embed.py\n",
226 226 "# This shows how to use the new top-level embed function. It is a simpler\n",
227 227 "# API that manages the creation of the embedded shell.\n",
228 228 "\n",
229 229 "from IPython import embed\n",
230 230 "\n",
231 231 "a = 10\n",
232 232 "b = 20\n",
233 233 "\n",
234 234 "embed(header='First time', banner1='')\n",
235 235 "\n",
236 236 "c = 30\n",
237 237 "d = 40\n",
238 238 "\n",
239 239 "embed(header='The second time')"
240 240 ]
241 241 },
242 242 {
243 243 "cell_type": "markdown",
244 244 "metadata": {},
245 245 "source": [
246 246 "The example in kernel-embedding shows how to embed a full kernel into an application and how to connect to this kernel from an external process."
247 247 ]
248 248 },
249 249 {
250 250 "cell_type": "markdown",
251 251 "metadata": {},
252 252 "source": [
253 253 "## Logging terminal sessions and transitioning to a notebook"
254 254 ]
255 255 },
256 256 {
257 257 "cell_type": "markdown",
258 258 "metadata": {},
259 259 "source": [
260 260 "The `%logstart` magic lets you log a terminal session with various degrees of control, and the `%notebook` one will convert an interactive console session into a notebook with all input cells already created for you (but no output)."
261 261 ]
262 262 }
263 263 ],
264 264 "metadata": {
265 265 "kernelspec": {
266 266 "display_name": "Python 3",
267 267 "language": "python",
268 268 "name": "python3"
269 269 },
270 270 "language_info": {
271 271 "codemirror_mode": {
272 272 "name": "ipython",
273 273 "version": 3
274 274 },
275 275 "file_extension": ".py",
276 276 "mimetype": "text/x-python",
277 277 "name": "python",
278 278 "nbconvert_exporter": "python",
279 279 "pygments_lexer": "ipython3",
280 280 "version": "3.4.2"
281 281 }
282 282 },
283 283 "nbformat": 4,
284 284 "nbformat_minor": 0
285 285 }
@@ -1,148 +1,148 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Setup script for IPython.
3 3
4 4 Under Posix environments it works like a typical setup.py script.
5 5 Under Windows, the command sdist is not supported, since IPython
6 6 requires utilities which are not available under Windows."""
7 7
8 8 #-----------------------------------------------------------------------------
9 9 # Copyright (c) 2008-2011, IPython Development Team.
10 10 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
11 11 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
12 12 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
13 13 #
14 14 # Distributed under the terms of the Modified BSD License.
15 15 #
16 16 # The full license is in the file COPYING.rst, distributed with this software.
17 17 #-----------------------------------------------------------------------------
18 18
19 19 import os
20 20 import sys
21 21
22 22 # **Python version check**
23 23 #
24 24 # This check is also made in IPython/__init__, don't forget to update both when
25 25 # changing Python version requirements.
26 26 if sys.version_info < (3, 8):
27 27 pip_message = 'This may be due to an out of date pip. Make sure you have pip >= 9.0.1.'
28 28 try:
29 29 import pip
30 30 pip_version = tuple([int(x) for x in pip.__version__.split('.')[:3]])
31 31 if pip_version < (9, 0, 1) :
32 32 pip_message = 'Your pip version is out of date, please install pip >= 9.0.1. '\
33 33 'pip {} detected.'.format(pip.__version__)
34 34 else:
35 35 # pip is new enough - it must be something else
36 36 pip_message = ''
37 37 except Exception:
38 38 pass
39 39
40 40
41 41 error = """
42 42 IPython 8+ supports Python 3.8 and above, following NEP 29.
43 43 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
44 44 Python 3.3 and 3.4 were supported up to IPython 6.x.
45 45 Python 3.5 was supported with IPython 7.0 to 7.9.
46 46 Python 3.6 was supported with IPython up to 7.16.
47 47 Python 3.7 was still supported with the 7.x branch.
48 48
49 49 See IPython `README.rst` file for more information:
50 50
51 https://github.com/ipython/ipython/blob/master/README.rst
51 https://github.com/ipython/ipython/blob/main/README.rst
52 52
53 53 Python {py} detected.
54 54 {pip}
55 55 """.format(
56 56 py=sys.version_info, pip=pip_message
57 57 )
58 58
59 59 print(error, file=sys.stderr)
60 60 sys.exit(1)
61 61
62 62 # At least we're on the python version we need, move on.
63 63
64 64 from setuptools import setup
65 65
66 66 # Our own imports
67 67 sys.path.insert(0, ".")
68 68
69 69 from setupbase import target_update
70 70
71 71 from setupbase import (
72 72 setup_args,
73 73 check_package_data_first,
74 74 find_data_files,
75 75 git_prebuild,
76 76 install_symlinked,
77 77 install_lib_symlink,
78 78 install_scripts_for_symlink,
79 79 unsymlink,
80 80 )
81 81
82 82 #-------------------------------------------------------------------------------
83 83 # Handle OS specific things
84 84 #-------------------------------------------------------------------------------
85 85
86 86 if os.name in ('nt','dos'):
87 87 os_name = 'windows'
88 88 else:
89 89 os_name = os.name
90 90
91 91 # Under Windows, 'sdist' has not been supported. Now that the docs build with
92 92 # Sphinx it might work, but let's not turn it on until someone confirms that it
93 93 # actually works.
94 94 if os_name == 'windows' and 'sdist' in sys.argv:
95 95 print('The sdist command is not available under Windows. Exiting.')
96 96 sys.exit(1)
97 97
98 98
99 99 #-------------------------------------------------------------------------------
100 100 # Things related to the IPython documentation
101 101 #-------------------------------------------------------------------------------
102 102
103 103 # update the manuals when building a source dist
104 104 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
105 105
106 106 # List of things to be updated. Each entry is a triplet of args for
107 107 # target_update()
108 108 to_update = [
109 109 (
110 110 "docs/man/ipython.1.gz",
111 111 ["docs/man/ipython.1"],
112 112 "cd docs/man && python -m gzip --best ipython.1",
113 113 ),
114 114 ]
115 115
116 116
117 117 [ target_update(*t) for t in to_update ]
118 118
119 119 #---------------------------------------------------------------------------
120 120 # Find all the packages, package data, and data_files
121 121 #---------------------------------------------------------------------------
122 122
123 123 data_files = find_data_files()
124 124
125 125 setup_args['data_files'] = data_files
126 126
127 127 #---------------------------------------------------------------------------
128 128 # custom distutils commands
129 129 #---------------------------------------------------------------------------
130 130 # imports here, so they are after setuptools import if there was one
131 131 from setuptools.command.sdist import sdist
132 132
133 133 setup_args['cmdclass'] = {
134 134 'build_py': \
135 135 check_package_data_first(git_prebuild('IPython')),
136 136 'sdist' : git_prebuild('IPython', sdist),
137 137 'symlink': install_symlinked,
138 138 'install_lib_symlink': install_lib_symlink,
139 139 'install_scripts_sym': install_scripts_for_symlink,
140 140 'unsymlink': unsymlink,
141 141 }
142 142
143 143 #---------------------------------------------------------------------------
144 144 # Do the actual setup now
145 145 #---------------------------------------------------------------------------
146 146
147 147 if __name__ == "__main__":
148 148 setup(**setup_args)
@@ -1,230 +1,230 b''
1 1 #!/usr/bin/env python
2 2 """Simple tools to query github.com and gather stats about issues.
3 3
4 4 To generate a report for IPython 2.0, run:
5 5
6 6 python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
7 7 """
8 8 #-----------------------------------------------------------------------------
9 9 # Imports
10 10 #-----------------------------------------------------------------------------
11 11
12 12
13 13 import sys
14 14
15 15 from argparse import ArgumentParser
16 16 from datetime import datetime, timedelta
17 17 from subprocess import check_output
18 18
19 19 from gh_api import (
20 20 get_paged_request, make_auth_header, get_pull_request, is_pull_request,
21 21 get_milestone_id, get_issues_list, get_authors,
22 22 )
23 23 #-----------------------------------------------------------------------------
24 24 # Globals
25 25 #-----------------------------------------------------------------------------
26 26
27 27 ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
28 28 PER_PAGE = 100
29 29
30 30 #-----------------------------------------------------------------------------
31 31 # Functions
32 32 #-----------------------------------------------------------------------------
33 33
34 34 def round_hour(dt):
35 35 return dt.replace(minute=0,second=0,microsecond=0)
36 36
37 37 def _parse_datetime(s):
38 38 """Parse dates in the format returned by the Github API."""
39 39 if s:
40 40 return datetime.strptime(s, ISO8601)
41 41 else:
42 42 return datetime.fromtimestamp(0)
43 43
44 44 def issues2dict(issues):
45 45 """Convert a list of issues to a dict, keyed by issue number."""
46 46 idict = {}
47 47 for i in issues:
48 48 idict[i['number']] = i
49 49 return idict
50 50
51 51 def split_pulls(all_issues, project="ipython/ipython"):
52 52 """split a list of closed issues into non-PR Issues and Pull Requests"""
53 53 pulls = []
54 54 issues = []
55 55 for i in all_issues:
56 56 if is_pull_request(i):
57 57 pull = get_pull_request(project, i['number'], auth=True)
58 58 pulls.append(pull)
59 59 else:
60 60 issues.append(i)
61 61 return issues, pulls
62 62
63 63
64 64 def issues_closed_since(period=timedelta(days=365), project="ipython/ipython", pulls=False):
65 65 """Get all issues closed since a particular point in time. period
66 66 can either be a datetime object, or a timedelta object. In the
67 67 latter case, it is used as a time before the present.
68 68 """
69 69
70 70 which = 'pulls' if pulls else 'issues'
71 71
72 72 if isinstance(period, timedelta):
73 73 since = round_hour(datetime.utcnow() - period)
74 74 else:
75 75 since = period
76 76 url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
77 77 allclosed = get_paged_request(url, headers=make_auth_header())
78 78
79 79 filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
80 80 if pulls:
81 81 filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
82 # filter out PRs not against master (backports)
83 filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
82 # filter out PRs not against main (backports)
83 filtered = [i for i in filtered if i["base"]["ref"] == "main"]
84 84 else:
85 85 filtered = [ i for i in filtered if not is_pull_request(i) ]
86 86
87 87 return filtered
88 88
89 89
90 90 def sorted_by_field(issues, field='closed_at', reverse=False):
91 91 """Return a list of issues sorted by closing date date."""
92 92 return sorted(issues, key = lambda i:i[field], reverse=reverse)
93 93
94 94
95 95 def report(issues, show_urls=False):
96 96 """Summary report about a list of issues, printing number and title."""
97 97 if show_urls:
98 98 for i in issues:
99 99 role = 'ghpull' if 'merged_at' in i else 'ghissue'
100 100 print(u'* :%s:`%d`: %s' % (role, i['number'],
101 101 i['title'].replace(u'`', u'``')))
102 102 else:
103 103 for i in issues:
104 104 print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
105 105
106 106 #-----------------------------------------------------------------------------
107 107 # Main script
108 108 #-----------------------------------------------------------------------------
109 109
110 110 if __name__ == "__main__":
111 111
112 112 print("DEPRECATE: backport_pr.py is deprecated and it is now recommended"
113 113 "to install `ghpro` from PyPI.", file=sys.stderr)
114 114
115 115
116 116 # Whether to add reST urls for all issues in printout.
117 117 show_urls = True
118 118
119 119 parser = ArgumentParser()
120 120 parser.add_argument('--since-tag', type=str,
121 121 help="The git tag to use for the starting point (typically the last major release)."
122 122 )
123 123 parser.add_argument('--milestone', type=str,
124 124 help="The GitHub milestone to use for filtering issues [optional]."
125 125 )
126 126 parser.add_argument('--days', type=int,
127 127 help="The number of days of data to summarize (use this or --since-tag)."
128 128 )
129 129 parser.add_argument('--project', type=str, default="ipython/ipython",
130 130 help="The project to summarize."
131 131 )
132 132 parser.add_argument('--links', action='store_true', default=False,
133 133 help="Include links to all closed Issues and PRs in the output."
134 134 )
135 135
136 136 opts = parser.parse_args()
137 137 tag = opts.since_tag
138 138
139 139 # set `since` from days or git tag
140 140 if opts.days:
141 141 since = datetime.utcnow() - timedelta(days=opts.days)
142 142 else:
143 143 if not tag:
144 144 tag = check_output(['git', 'describe', '--abbrev=0']).strip().decode('utf8')
145 145 cmd = ['git', 'log', '-1', '--format=%ai', tag]
146 146 tagday, tz = check_output(cmd).strip().decode('utf8').rsplit(' ', 1)
147 147 since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
148 148 h = int(tz[1:3])
149 149 m = int(tz[3:])
150 150 td = timedelta(hours=h, minutes=m)
151 151 if tz[0] == '-':
152 152 since += td
153 153 else:
154 154 since -= td
155 155
156 156 since = round_hour(since)
157 157
158 158 milestone = opts.milestone
159 159 project = opts.project
160 160
161 161 print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
162 162 if milestone:
163 163 milestone_id = get_milestone_id(project=project, milestone=milestone,
164 164 auth=True)
165 165 issues_and_pulls = get_issues_list(project=project,
166 166 milestone=milestone_id,
167 167 state='closed',
168 168 auth=True,
169 169 )
170 170 issues, pulls = split_pulls(issues_and_pulls, project=project)
171 171 else:
172 172 issues = issues_closed_since(since, project=project, pulls=False)
173 173 pulls = issues_closed_since(since, project=project, pulls=True)
174 174
175 175 # For regular reports, it's nice to show them in reverse chronological order
176 176 issues = sorted_by_field(issues, reverse=True)
177 177 pulls = sorted_by_field(pulls, reverse=True)
178 178
179 179 n_issues, n_pulls = map(len, (issues, pulls))
180 180 n_total = n_issues + n_pulls
181 181
182 182 # Print summary report we can directly include into release notes.
183 183
184 184 print()
185 185 since_day = since.strftime("%Y/%m/%d")
186 186 today = datetime.today().strftime("%Y/%m/%d")
187 187 print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
188 188 print()
189 189 print("These lists are automatically generated, and may be incomplete or contain duplicates.")
190 190 print()
191 191
192 192 ncommits = 0
193 193 all_authors = []
194 194 if tag:
195 195 # print git info, in addition to GitHub info:
196 196 since_tag = tag+'..'
197 197 cmd = ['git', 'log', '--oneline', since_tag]
198 198 ncommits += len(check_output(cmd).splitlines())
199 199
200 200 author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
201 201 all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
202 202
203 203 pr_authors = []
204 204 for pr in pulls:
205 205 pr_authors.extend(get_authors(pr))
206 206 ncommits = len(pr_authors) + ncommits - len(pulls)
207 207 author_cmd = ['git', 'check-mailmap'] + pr_authors
208 208 with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
209 209 all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
210 210 unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
211 211
212 212 print("We closed %d issues and merged %d pull requests." % (n_issues, n_pulls))
213 213 if milestone:
214 214 print("The full list can be seen `on GitHub <https://github.com/{project}/issues?q=milestone%3A{milestone}>`__".format(project=project,milestone=milestone)
215 215 )
216 216
217 217 print()
218 218 print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
219 219 print()
220 220 print('\n'.join(unique_authors))
221 221
222 222 if opts.links:
223 223 print()
224 224 print("GitHub issues and pull requests:")
225 225 print()
226 226 print('Pull Requests (%d):\n' % n_pulls)
227 227 report(pulls, show_urls)
228 228 print()
229 229 print('Issues (%d):\n' % n_issues)
230 230 report(issues, show_urls)
@@ -1,251 +1,251 b''
1 1 # Simple tool to help for release
2 2 # when releasing with bash, simple source it to get asked questions.
3 3
4 4 # misc check before starting
5 5
6 6 python -c 'import keyring'
7 7 python -c 'import twine'
8 8 python -c 'import sphinx'
9 9 python -c 'import sphinx_rtd_theme'
10 10 python -c 'import pytest'
11 11
12 12
13 13 BLACK=$(tput setaf 1)
14 14 RED=$(tput setaf 1)
15 15 GREEN=$(tput setaf 2)
16 16 YELLOW=$(tput setaf 3)
17 17 BLUE=$(tput setaf 4)
18 18 MAGENTA=$(tput setaf 5)
19 19 CYAN=$(tput setaf 6)
20 20 WHITE=$(tput setaf 7)
21 21 NOR=$(tput sgr0)
22 22
23 23
24 24 echo "Will use $BLUE'$EDITOR'$NOR to edit files when necessary"
25 25 echo -n "PREV_RELEASE (X.y.z) [$PREV_RELEASE]: "
26 26 read input
27 27 PREV_RELEASE=${input:-$PREV_RELEASE}
28 28 echo -n "MILESTONE (X.y) [$MILESTONE]: "
29 29 read input
30 30 MILESTONE=${input:-$MILESTONE}
31 31 echo -n "VERSION (X.y.z) [$VERSION]:"
32 32 read input
33 33 VERSION=${input:-$VERSION}
34 echo -n "BRANCH (master|X.y) [$BRANCH]:"
34 echo -n "BRANCH (main|X.y) [$BRANCH]:"
35 35 read input
36 36 BRANCH=${input:-$BRANCH}
37 37
38 38 ask_section(){
39 39 echo
40 40 echo $BLUE"$1"$NOR
41 41 echo -n $GREEN"Press Enter to continue, S to skip: "$NOR
42 42 if [ "$ZSH_NAME" = "zsh" ] ; then
43 43 read -k1 value
44 44 value=${value%$'\n'}
45 45 else
46 46 read -n1 value
47 47 fi
48 48 if [ -z "$value" ] || [ $value = 'y' ]; then
49 49 return 0
50 50 fi
51 51 return 1
52 52 }
53 53
54 54
55 55 maybe_edit(){
56 56 echo
57 57 echo $BLUE"$1"$NOR
58 58 echo -n $GREEN"Press ${BLUE}e$GREEN to Edit ${BLUE}$1$GREEN, any other keys to skip: "$NOR
59 59 if [ "$ZSH_NAME" = "zsh" ] ; then
60 60 read -k1 value
61 61 value=${value%$'\n'}
62 62 else
63 63 read -n1 value
64 64 fi
65 65
66 66 echo
67 67 if [ $value = 'e' ] ; then
68 68 $=EDITOR $1
69 69 fi
70 70 }
71 71
72 72
73 73
74 74 echo
75 75 if ask_section "Updating what's new with information from docs/source/whatsnew/pr"
76 76 then
77 77 python tools/update_whatsnew.py
78 78
79 79 echo
80 80 echo $BLUE"please move the contents of "docs/source/whatsnew/development.rst" to version-X.rst"$NOR
81 81 echo $GREEN"Press enter to continue"$NOR
82 82 read
83 83 fi
84 84
85 85 if ask_section "Gen Stats, and authors"
86 86 then
87 87
88 88 echo
89 89 echo $BLUE"here are all the authors that contributed to this release:"$NOR
90 90 git log --format="%aN <%aE>" $PREV_RELEASE... | sort -u -f
91 91
92 92 echo
93 93 echo $BLUE"If you see any duplicates cancel (Ctrl-C), then edit .mailmap."
94 94 echo $GREEN"Press enter to continue:"$NOR
95 95 read
96 96
97 97 echo $BLUE"generating stats"$NOR
98 98 python tools/github_stats.py --milestone $MILESTONE > stats.rst
99 99
100 100 echo $BLUE"stats.rst files generated."$NOR
101 101 echo $GREEN"Please merge it with the right file (github-stats-X.rst) and commit."$NOR
102 102 echo $GREEN"press enter to continue."$NOR
103 103 read
104 104
105 105 fi
106 106
107 107 if ask_section "Generate API difference (using frapuccino)"
108 108 then
109 109 echo $BLUE"Checking out $PREV_RELEASE"$NOR
110 110 git checkout $PREV_RELEASE
111 111 sleep 1
112 112 echo $BLUE"Saving API to file $PREV_RELEASE"$NOR
113 113 frappuccino IPython IPython.kernel IPython.lib IPython.qt IPython.lib.kernel IPython.html IPython.frontend IPython.external --save IPython-$PREV_RELEASE.json
114 114 echo $BLUE"coming back to $BRANCH"$NOR
115 115 git checkout $BRANCH
116 116 sleep 1
117 117 echo $BLUE"comparing ..."$NOR
118 118 frappuccino IPython IPython.kernel IPython.lib --compare IPython-$PREV_RELEASE.json
119 119 echo $GREEN"Use the above guideline to write an API changelog ..."$NOR
120 120 echo $GREEN"Press any keys to continue"$NOR
121 121 read
122 122 fi
123 123
124 124 echo "Cleaning repository"
125 125 git clean -xfdi
126 126
127 127 echo $GREEN"please update version number in ${RED}IPython/core/release.py${NOR} , Do not commit yet – we'll do it later."$NOR
128 128 echo $GREEN"I tried ${RED}sed -i bkp -e '/Uncomment/s/^# //g' IPython/core/release.py${NOR}"
129 129 sed -i bkp -e '/Uncomment/s/^# //g' IPython/core/release.py
130 130 rm IPython/core/release.pybkp
131 131 git diff | cat
132 132 maybe_edit IPython/core/release.py
133 133
134 134 echo $GREEN"Press enter to continue"$NOR
135 135 read
136 136
137 137 if ask_section "Build the documentation ?"
138 138 then
139 139 make html -C docs
140 140 echo
141 141 echo $GREEN"Check the docs, press enter to continue"$NOR
142 142 read
143 143
144 144 fi
145 145
146 146 if ask_section "Should we commit, tag, push... etc ? "
147 147 then
148 148 echo
149 149 echo $BLUE"Let's commit : git commit -am \"release $VERSION\" -S"
150 150 echo $GREEN"Press enter to commit"$NOR
151 151 read
152 152 git commit -am "release $VERSION" -S
153 153
154 154 echo
155 155 echo $BLUE"git push origin \$BRANCH ($BRANCH)?"$NOR
156 156 echo $GREEN"Make sure you can push"$NOR
157 157 echo $GREEN"Press enter to continue"$NOR
158 158 read
159 159 git push origin $BRANCH
160 160
161 161 echo
162 162 echo "Let's tag : git tag -am \"release $VERSION\" \"$VERSION\" -s"
163 163 echo $GREEN"Press enter to tag commit"$NOR
164 164 read
165 165 git tag -am "release $VERSION" "$VERSION" -s
166 166
167 167 echo
168 168 echo $BLUE"And push the tag: git push origin \$VERSION ?"$NOR
169 169 echo $GREEN"Press enter to continue"$NOR
170 170 read
171 171 git push origin $VERSION
172 172
173 173
174 174 echo $GREEN"please update version number and back to .dev in ${RED}IPython/core/release.py"
175 175 echo $GREEN"I tried ${RED}sed -i bkp -e '/Uncomment/s/^/# /g' IPython/core/release.py${NOR}"
176 176 sed -i bkp -e '/Uncomment/s/^/# /g' IPython/core/release.py
177 177 rm IPython/core/release.pybkp
178 178 git diff | cat
179 179 echo $GREEN"Please bump ${RED}the minor version number${NOR}"
180 180 maybe_edit IPython/core/release.py
181 181 echo ${BLUE}"Do not commit yet – we'll do it later."$NOR
182 182
183 183
184 184 echo $GREEN"Press enter to continue"$NOR
185 185 read
186 186
187 187 echo
188 188 echo "Let's commit : "$BLUE"git commit -am \"back to dev\""$NOR
189 189 echo $GREEN"Press enter to commit"$NOR
190 190 read
191 191 git commit -am "back to dev"
192 192
193 193 echo
194 194 echo $BLUE"git push origin \$BRANCH ($BRANCH)?"$NOR
195 195 echo $GREEN"Press enter to continue"$NOR
196 196 read
197 197 git push origin $BRANCH
198 198
199 199
200 200 echo
201 201 echo $BLUE"let's : git checkout $VERSION"$NOR
202 202 echo $GREEN"Press enter to continue"$NOR
203 203 read
204 204 git checkout $VERSION
205 205 fi
206 206
207 207 if ask_section "Should we build and release ?"
208 208 then
209 209
210 210 echo $BLUE"going to set SOURCE_DATE_EPOCH"$NOR
211 211 echo $BLUE'export SOURCE_DATE_EPOCH=$(git show -s --format=%ct HEAD)'$NOR
212 212 echo $GREEN"Press enter to continue"$NOR
213 213 read
214 214
215 215 export SOURCE_DATE_EPOCH=$(git show -s --format=%ct HEAD)
216 216
217 217 echo $BLUE"SOURCE_DATE_EPOCH set to $SOURCE_DATE_EPOCH"$NOR
218 218 echo $GREEN"Press enter to continue"$NOR
219 219 read
220 220
221 221
222 222
223 223 echo
224 224 echo $BLUE"Attempting to build package..."$NOR
225 225
226 226 tools/release
227 227
228 228
229 229 echo $RED'$ shasum -a 256 dist/*'
230 230 shasum -a 256 dist/*
231 231 echo $NOR
232 232
233 233 echo $BLUE"We are going to rebuild, node the hash above, and compare them to the rebuild"$NOR
234 234 echo $GREEN"Press enter to continue"$NOR
235 235 read
236 236
237 237 echo
238 238 echo $BLUE"Attempting to build package..."$NOR
239 239
240 240 tools/release
241 241
242 242 echo $RED"Check the shasum for SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH"
243 243 echo $RED'$ shasum -a 256 dist/*'
244 244 shasum -a 256 dist/*
245 245 echo $NOR
246 246
247 247 if ask_section "upload packages ?"
248 248 then
249 249 tools/release upload
250 250 fi
251 251 fi
@@ -1,85 +1,87 b''
1 1 """
2 2 Un-targz and retargz a targz file to ensure reproducible build.
3 3
4 4 usage:
5 5
6 6 $ export SOURCE_DATE_EPOCH=$(date +%s)
7 # or
8 $ export SOURCE_DATE_EPOCH=$(git show -s --format=%ct HEAD)
7 9 ...
8 10 $ python retar.py <tarfile.gz>
9 11
10 12 The process of creating an sdist can be non-reproducible:
11 13 - directory created during the process get a mtime of the creation date;
12 14 - gziping files embed the timestamp of zip creation.
13 15
14 16 This will untar-retar; ensuring that all mtime > SOURCE_DATE_EPOCH will be set
15 17 equal to SOURCE_DATE_EPOCH.
16 18
17 19 """
18 20
19 21 import tarfile
20 22 import sys
21 23 import os
22 24 import gzip
23 25 import io
24 26
25 27 from pathlib import Path
26 28
27 29 if len(sys.argv) > 2:
28 30 raise ValueError("Too many arguments")
29 31
30 32
31 33 timestamp = int(os.environ["SOURCE_DATE_EPOCH"])
32 34
33 35 path = Path(sys.argv[1])
34 36 old_buf = io.BytesIO()
35 37 with open(path, "rb") as f:
36 38 old_buf.write(f.read())
37 39 old_buf.seek(0)
38 40 if path.name.endswith("gz"):
39 41 r_mode = "r:gz"
40 42 if path.name.endswith("bz2"):
41 43 r_mode = "r:bz2"
42 44 if path.name.endswith("xz"):
43 45 raise ValueError("XZ is deprecated but it's written nowhere")
44 46 old = tarfile.open(fileobj=old_buf, mode=r_mode)
45 47
46 48 buf = io.BytesIO()
47 49 new = tarfile.open(fileobj=buf, mode="w", format=tarfile.GNU_FORMAT)
48 50 for i, m in enumerate(old):
49 51 data = None
50 52 # mutation does not work, copy
51 53 if m.name.endswith('.DS_Store'):
52 54 continue
53 55 m2 = tarfile.TarInfo(m.name)
54 56 m2.mtime = min(timestamp, m.mtime)
55 57 m2.pax_headers["mtime"] = m2.mtime
56 58 m2.size = m.size
57 59 m2.type = m.type
58 60 m2.linkname = m.linkname
59 61 m2.mode = m.mode
60 62 if m.isdir():
61 63 new.addfile(m2)
62 64 else:
63 65 data = old.extractfile(m)
64 66 new.addfile(m2, data)
65 67 new.close()
66 68 old.close()
67 69
68 70 buf.seek(0)
69 71
70 72 if r_mode == "r:gz":
71 73 with open(path, "wb") as f:
72 74 with gzip.GzipFile("", "wb", fileobj=f, mtime=timestamp) as gzf:
73 75 gzf.write(buf.read())
74 76 elif r_mode == "r:bz2":
75 77 import bz2
76 78
77 79 with bz2.open(path, "wb") as f:
78 80 f.write(buf.read())
79 81
80 82 else:
81 83 assert False
82 84
83 85 # checks the archive is valid.
84 86 archive = tarfile.open(path)
85 87 names = archive.getnames()
General Comments 0
You need to be logged in to leave comments. Login now