##// END OF EJS Templates
misc fixes
M Bussonnier -
Show More
@@ -1,106 +1,106
1 name: Run tests
1 name: Run tests
2
2
3 on:
3 on:
4 push:
4 push:
5 branches:
5 branches:
6 - main
6 - main
7 - '*.x'
7 - '*.x'
8 pull_request:
8 pull_request:
9 # Run weekly on Monday at 1:23 UTC
9 # Run weekly on Monday at 1:23 UTC
10 schedule:
10 schedule:
11 - cron: '23 1 * * 1'
11 - cron: '23 1 * * 1'
12 workflow_dispatch:
12 workflow_dispatch:
13
13
14
14
15 jobs:
15 jobs:
16 test:
16 test:
17 runs-on: ${{ matrix.os }}
17 runs-on: ${{ matrix.os }}
18 # Disable scheduled CI runs on forks
18 # Disable scheduled CI runs on forks
19 if: github.event_name != 'schedule' || github.repository_owner == 'ipython'
19 if: github.event_name != 'schedule' || github.repository_owner == 'ipython'
20 strategy:
20 strategy:
21 fail-fast: false
21 fail-fast: false
22 matrix:
22 matrix:
23 os: [ubuntu-latest, windows-latest]
23 os: [ubuntu-latest, windows-latest]
24 python-version: ["3.11", "3.12","3.13"]
24 python-version: ["3.11", "3.12","3.13"]
25 deps: [test_extra]
25 deps: [test_extra]
26 # Test all on ubuntu, test ends on macos
26 # Test all on ubuntu, test ends on macos
27 include:
27 include:
28 - os: macos-latest
28 - os: macos-latest
29 python-version: "3.11"
29 python-version: "3.11"
30 deps: test_extra
30 deps: test_extra
31 # Tests minimal dependencies set
31 # Tests minimal dependencies set
32 - os: ubuntu-latest
32 - os: ubuntu-latest
33 python-version: "3.11"
33 python-version: "3.11"
34 deps: test
34 deps: test
35 # Tests latest development Python version
35 # Tests latest development Python version
36 - os: ubuntu-latest
36 - os: ubuntu-latest
37 python-version: "3.13"
37 python-version: "3.13"
38 deps: test
38 deps: test
39 # Installing optional dependencies stuff takes ages on PyPy
39 # Installing optional dependencies stuff takes ages on PyPy
40 - os: ubuntu-latest
40 # - os: ubuntu-latest
41 python-version: "pypy-3.11"
41 # python-version: "pypy-3.11"
42 deps: test
42 # deps: test
43 - os: windows-latest
43 # - os: windows-latest
44 python-version: "pypy-3.11"
44 # python-version: "pypy-3.11"
45 deps: test
45 # deps: test
46 - os: macos-latest
46 # - os: macos-latest
47 python-version: "pypy-3.11"
47 # python-version: "pypy-3.11"
48 deps: test
48 # deps: test
49 # Temporary CI run to use entry point compatible code in matplotlib-inline.
49 # Temporary CI run to use entry point compatible code in matplotlib-inline.
50 - os: ubuntu-latest
50 - os: ubuntu-latest
51 python-version: "3.12"
51 python-version: "3.12"
52 deps: test_extra
52 deps: test_extra
53 want-latest-entry-point-code: true
53 want-latest-entry-point-code: true
54
54
55 steps:
55 steps:
56 - uses: actions/checkout@v4
56 - uses: actions/checkout@v4
57 - name: Set up Python ${{ matrix.python-version }}
57 - name: Set up Python ${{ matrix.python-version }}
58 uses: actions/setup-python@v5
58 uses: actions/setup-python@v5
59 with:
59 with:
60 python-version: ${{ matrix.python-version }}
60 python-version: ${{ matrix.python-version }}
61 cache: pip
61 cache: pip
62 cache-dependency-path: |
62 cache-dependency-path: |
63 pyproject.toml
63 pyproject.toml
64 - name: Install latex
64 - name: Install latex
65 if: runner.os == 'Linux' && matrix.deps == 'test_extra'
65 if: runner.os == 'Linux' && matrix.deps == 'test_extra'
66 run: echo "disable latex for now, issues in mirros" #sudo apt-get -yq -o Acquire::Retries=3 --no-install-suggests --no-install-recommends install texlive dvipng
66 run: echo "disable latex for now, issues in mirros" #sudo apt-get -yq -o Acquire::Retries=3 --no-install-suggests --no-install-recommends install texlive dvipng
67 - name: Install and update Python dependencies (binary only)
67 - name: Install and update Python dependencies (binary only)
68 if: ${{ ! contains( matrix.python-version, 'dev' ) }}
68 if: ${{ ! contains( matrix.python-version, 'dev' ) }}
69 run: |
69 run: |
70 python -m pip install --only-binary ':all:' --upgrade pip setuptools wheel build
70 python -m pip install --only-binary ':all:' --upgrade pip setuptools wheel build
71 python -m pip install --only-binary ':all:' --no-binary curio --upgrade -e .[${{ matrix.deps }}]
71 python -m pip install --only-binary ':all:' --no-binary curio --upgrade -e .[${{ matrix.deps }}]
72 python -m pip install --only-binary ':all:' --upgrade check-manifest pytest-cov 'pytest<8'
72 python -m pip install --only-binary ':all:' --upgrade check-manifest pytest-cov 'pytest<8'
73 - name: Install and update Python dependencies (dev?)
73 - name: Install and update Python dependencies (dev?)
74 if: ${{ contains( matrix.python-version, 'dev' ) }}
74 if: ${{ contains( matrix.python-version, 'dev' ) }}
75 run: |
75 run: |
76 python -m pip install --pre --upgrade pip setuptools wheel build
76 python -m pip install --pre --upgrade pip setuptools wheel build
77 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --no-binary curio --upgrade -e .[${{ matrix.deps }}]
77 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --no-binary curio --upgrade -e .[${{ matrix.deps }}]
78 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --upgrade check-manifest pytest-cov
78 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --upgrade check-manifest pytest-cov
79 - name: Try building with Python build
79 - name: Try building with Python build
80 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
80 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
81 run: |
81 run: |
82 python -m build
82 python -m build
83 shasum -a 256 dist/*
83 shasum -a 256 dist/*
84 - name: Check manifest
84 - name: Check manifest
85 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
85 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
86 run: check-manifest
86 run: check-manifest
87
87
88 - name: Install entry point compatible code (TEMPORARY)
88 - name: Install entry point compatible code (TEMPORARY, April 2024)
89 if: matrix.want-latest-entry-point-code
89 if: matrix.want-latest-entry-point-code
90 run: |
90 run: |
91 python -m pip list
91 python -m pip list
92 # Not installing matplotlib's entry point code as building matplotlib from source is complex.
92 # Not installing matplotlib's entry point code as building matplotlib from source is complex.
93 # Rely upon matplotlib to test all the latest entry point branches together.
93 # Rely upon matplotlib to test all the latest entry point branches together.
94 python -m pip install --upgrade git+https://github.com/ipython/matplotlib-inline.git@main
94 python -m pip install --upgrade git+https://github.com/ipython/matplotlib-inline.git@main
95 python -m pip list
95 python -m pip list
96
96
97 - name: pytest
97 - name: pytest
98 env:
98 env:
99 COLUMNS: 120
99 COLUMNS: 120
100 run: |
100 run: |
101 pytest --color=yes -raXxs ${{ startsWith(matrix.python-version, 'pypy') && ' ' || '--cov --cov-report=xml' }} --maxfail=15
101 pytest --color=yes -raXxs ${{ startsWith(matrix.python-version, 'pypy') && ' ' || '--cov --cov-report=xml' }} --maxfail=15
102 - name: Upload coverage to Codecov
102 - name: Upload coverage to Codecov
103 uses: codecov/codecov-action@v4
103 uses: codecov/codecov-action@v4
104 with:
104 with:
105 name: Test
105 name: Test
106 files: /home/runner/work/ipython/ipython/coverage.xml
106 files: /home/runner/work/ipython/ipython/coverage.xml
@@ -1,3379 +1,3378
1 """Completion for IPython.
1 """Completion for IPython.
2
2
3 This module started as fork of the rlcompleter module in the Python standard
3 This module started as fork of the rlcompleter module in the Python standard
4 library. The original enhancements made to rlcompleter have been sent
4 library. The original enhancements made to rlcompleter have been sent
5 upstream and were accepted as of Python 2.3,
5 upstream and were accepted as of Python 2.3,
6
6
7 This module now support a wide variety of completion mechanism both available
7 This module now support a wide variety of completion mechanism both available
8 for normal classic Python code, as well as completer for IPython specific
8 for normal classic Python code, as well as completer for IPython specific
9 Syntax like magics.
9 Syntax like magics.
10
10
11 Latex and Unicode completion
11 Latex and Unicode completion
12 ============================
12 ============================
13
13
14 IPython and compatible frontends not only can complete your code, but can help
14 IPython and compatible frontends not only can complete your code, but can help
15 you to input a wide range of characters. In particular we allow you to insert
15 you to input a wide range of characters. In particular we allow you to insert
16 a unicode character using the tab completion mechanism.
16 a unicode character using the tab completion mechanism.
17
17
18 Forward latex/unicode completion
18 Forward latex/unicode completion
19 --------------------------------
19 --------------------------------
20
20
21 Forward completion allows you to easily type a unicode character using its latex
21 Forward completion allows you to easily type a unicode character using its latex
22 name, or unicode long description. To do so type a backslash follow by the
22 name, or unicode long description. To do so type a backslash follow by the
23 relevant name and press tab:
23 relevant name and press tab:
24
24
25
25
26 Using latex completion:
26 Using latex completion:
27
27
28 .. code::
28 .. code::
29
29
30 \\alpha<tab>
30 \\alpha<tab>
31 Ξ±
31 Ξ±
32
32
33 or using unicode completion:
33 or using unicode completion:
34
34
35
35
36 .. code::
36 .. code::
37
37
38 \\GREEK SMALL LETTER ALPHA<tab>
38 \\GREEK SMALL LETTER ALPHA<tab>
39 Ξ±
39 Ξ±
40
40
41
41
42 Only valid Python identifiers will complete. Combining characters (like arrow or
42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 dots) are also available, unlike latex they need to be put after the their
43 dots) are also available, unlike latex they need to be put after the their
44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
45
45
46 Some browsers are known to display combining characters incorrectly.
46 Some browsers are known to display combining characters incorrectly.
47
47
48 Backward latex completion
48 Backward latex completion
49 -------------------------
49 -------------------------
50
50
51 It is sometime challenging to know how to type a character, if you are using
51 It is sometime challenging to know how to type a character, if you are using
52 IPython, or any compatible frontend you can prepend backslash to the character
52 IPython, or any compatible frontend you can prepend backslash to the character
53 and press :kbd:`Tab` to expand it to its latex form.
53 and press :kbd:`Tab` to expand it to its latex form.
54
54
55 .. code::
55 .. code::
56
56
57 \\Ξ±<tab>
57 \\Ξ±<tab>
58 \\alpha
58 \\alpha
59
59
60
60
61 Both forward and backward completions can be deactivated by setting the
61 Both forward and backward completions can be deactivated by setting the
62 :std:configtrait:`Completer.backslash_combining_completions` option to
62 :std:configtrait:`Completer.backslash_combining_completions` option to
63 ``False``.
63 ``False``.
64
64
65
65
66 Experimental
66 Experimental
67 ============
67 ============
68
68
69 Starting with IPython 6.0, this module can make use of the Jedi library to
69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 generate completions both using static analysis of the code, and dynamically
70 generate completions both using static analysis of the code, and dynamically
71 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
72 for Python. The APIs attached to this new mechanism is unstable and will
72 for Python. The APIs attached to this new mechanism is unstable and will
73 raise unless use in an :any:`provisionalcompleter` context manager.
73 raise unless use in an :any:`provisionalcompleter` context manager.
74
74
75 You will find that the following are experimental:
75 You will find that the following are experimental:
76
76
77 - :any:`provisionalcompleter`
77 - :any:`provisionalcompleter`
78 - :any:`IPCompleter.completions`
78 - :any:`IPCompleter.completions`
79 - :any:`Completion`
79 - :any:`Completion`
80 - :any:`rectify_completions`
80 - :any:`rectify_completions`
81
81
82 .. note::
82 .. note::
83
83
84 better name for :any:`rectify_completions` ?
84 better name for :any:`rectify_completions` ?
85
85
86 We welcome any feedback on these new API, and we also encourage you to try this
86 We welcome any feedback on these new API, and we also encourage you to try this
87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 to have extra logging information if :any:`jedi` is crashing, or if current
88 to have extra logging information if :any:`jedi` is crashing, or if current
89 IPython completer pending deprecations are returning results not yet handled
89 IPython completer pending deprecations are returning results not yet handled
90 by :any:`jedi`
90 by :any:`jedi`
91
91
92 Using Jedi for tab completion allow snippets like the following to work without
92 Using Jedi for tab completion allow snippets like the following to work without
93 having to execute any code:
93 having to execute any code:
94
94
95 >>> myvar = ['hello', 42]
95 >>> myvar = ['hello', 42]
96 ... myvar[1].bi<tab>
96 ... myvar[1].bi<tab>
97
97
98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
99 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
100 option.
100 option.
101
101
102 Be sure to update :any:`jedi` to the latest stable version or to try the
102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 current development version to get better completions.
103 current development version to get better completions.
104
104
105 Matchers
105 Matchers
106 ========
106 ========
107
107
108 All completions routines are implemented using unified *Matchers* API.
108 All completions routines are implemented using unified *Matchers* API.
109 The matchers API is provisional and subject to change without notice.
109 The matchers API is provisional and subject to change without notice.
110
110
111 The built-in matchers include:
111 The built-in matchers include:
112
112
113 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
113 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
114 - :any:`IPCompleter.magic_matcher`: completions for magics,
114 - :any:`IPCompleter.magic_matcher`: completions for magics,
115 - :any:`IPCompleter.unicode_name_matcher`,
115 - :any:`IPCompleter.unicode_name_matcher`,
116 :any:`IPCompleter.fwd_unicode_matcher`
116 :any:`IPCompleter.fwd_unicode_matcher`
117 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
117 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
118 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
118 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
119 - :any:`IPCompleter.file_matcher`: paths to files and directories,
119 - :any:`IPCompleter.file_matcher`: paths to files and directories,
120 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
120 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
121 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
121 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
122 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
122 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
123 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
123 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
124 implementation in :any:`InteractiveShell` which uses IPython hooks system
124 implementation in :any:`InteractiveShell` which uses IPython hooks system
125 (`complete_command`) with string dispatch (including regular expressions).
125 (`complete_command`) with string dispatch (including regular expressions).
126 Differently to other matchers, ``custom_completer_matcher`` will not suppress
126 Differently to other matchers, ``custom_completer_matcher`` will not suppress
127 Jedi results to match behaviour in earlier IPython versions.
127 Jedi results to match behaviour in earlier IPython versions.
128
128
129 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
129 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
130
130
131 Matcher API
131 Matcher API
132 -----------
132 -----------
133
133
134 Simplifying some details, the ``Matcher`` interface can described as
134 Simplifying some details, the ``Matcher`` interface can described as
135
135
136 .. code-block::
136 .. code-block::
137
137
138 MatcherAPIv1 = Callable[[str], list[str]]
138 MatcherAPIv1 = Callable[[str], list[str]]
139 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
139 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
140
140
141 Matcher = MatcherAPIv1 | MatcherAPIv2
141 Matcher = MatcherAPIv1 | MatcherAPIv2
142
142
143 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
143 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
144 and remains supported as a simplest way for generating completions. This is also
144 and remains supported as a simplest way for generating completions. This is also
145 currently the only API supported by the IPython hooks system `complete_command`.
145 currently the only API supported by the IPython hooks system `complete_command`.
146
146
147 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
147 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
148 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
148 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
149 and requires a literal ``2`` for v2 Matchers.
149 and requires a literal ``2`` for v2 Matchers.
150
150
151 Once the API stabilises future versions may relax the requirement for specifying
151 Once the API stabilises future versions may relax the requirement for specifying
152 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
152 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
153 please do not rely on the presence of ``matcher_api_version`` for any purposes.
153 please do not rely on the presence of ``matcher_api_version`` for any purposes.
154
154
155 Suppression of competing matchers
155 Suppression of competing matchers
156 ---------------------------------
156 ---------------------------------
157
157
158 By default results from all matchers are combined, in the order determined by
158 By default results from all matchers are combined, in the order determined by
159 their priority. Matchers can request to suppress results from subsequent
159 their priority. Matchers can request to suppress results from subsequent
160 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
160 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
161
161
162 When multiple matchers simultaneously request suppression, the results from of
162 When multiple matchers simultaneously request suppression, the results from of
163 the matcher with higher priority will be returned.
163 the matcher with higher priority will be returned.
164
164
165 Sometimes it is desirable to suppress most but not all other matchers;
165 Sometimes it is desirable to suppress most but not all other matchers;
166 this can be achieved by adding a set of identifiers of matchers which
166 this can be achieved by adding a set of identifiers of matchers which
167 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
167 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
168
168
169 The suppression behaviour can is user-configurable via
169 The suppression behaviour can is user-configurable via
170 :std:configtrait:`IPCompleter.suppress_competing_matchers`.
170 :std:configtrait:`IPCompleter.suppress_competing_matchers`.
171 """
171 """
172
172
173
173
174 # Copyright (c) IPython Development Team.
174 # Copyright (c) IPython Development Team.
175 # Distributed under the terms of the Modified BSD License.
175 # Distributed under the terms of the Modified BSD License.
176 #
176 #
177 # Some of this code originated from rlcompleter in the Python standard library
177 # Some of this code originated from rlcompleter in the Python standard library
178 # Copyright (C) 2001 Python Software Foundation, www.python.org
178 # Copyright (C) 2001 Python Software Foundation, www.python.org
179
179
180 from __future__ import annotations
180 from __future__ import annotations
181 import builtins as builtin_mod
181 import builtins as builtin_mod
182 import enum
182 import enum
183 import glob
183 import glob
184 import inspect
184 import inspect
185 import itertools
185 import itertools
186 import keyword
186 import keyword
187 import os
187 import os
188 import re
188 import re
189 import string
189 import string
190 import sys
190 import sys
191 import tokenize
191 import tokenize
192 import time
192 import time
193 import unicodedata
193 import unicodedata
194 import uuid
194 import uuid
195 import warnings
195 import warnings
196 from ast import literal_eval
196 from ast import literal_eval
197 from collections import defaultdict
197 from collections import defaultdict
198 from contextlib import contextmanager
198 from contextlib import contextmanager
199 from dataclasses import dataclass
199 from dataclasses import dataclass
200 from functools import cached_property, partial
200 from functools import cached_property, partial
201 from types import SimpleNamespace
201 from types import SimpleNamespace
202 from typing import (
202 from typing import (
203 Iterable,
203 Iterable,
204 Iterator,
204 Iterator,
205 List,
205 List,
206 Tuple,
206 Tuple,
207 Union,
207 Union,
208 Any,
208 Any,
209 Sequence,
209 Sequence,
210 Dict,
210 Dict,
211 Optional,
211 Optional,
212 TYPE_CHECKING,
212 TYPE_CHECKING,
213 Set,
213 Set,
214 Sized,
214 Sized,
215 TypeVar,
215 TypeVar,
216 Literal,
216 Literal,
217 )
217 )
218
218
219 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
219 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
220 from IPython.core.error import TryNext
220 from IPython.core.error import TryNext
221 from IPython.core.inputtransformer2 import ESC_MAGIC
221 from IPython.core.inputtransformer2 import ESC_MAGIC
222 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
222 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
223 from IPython.core.oinspect import InspectColors
223 from IPython.core.oinspect import InspectColors
224 from IPython.testing.skipdoctest import skip_doctest
224 from IPython.testing.skipdoctest import skip_doctest
225 from IPython.utils import generics
225 from IPython.utils import generics
226 from IPython.utils.decorators import sphinx_options
226 from IPython.utils.decorators import sphinx_options
227 from IPython.utils.dir2 import dir2, get_real_method
227 from IPython.utils.dir2 import dir2, get_real_method
228 from IPython.utils.docs import GENERATING_DOCUMENTATION
228 from IPython.utils.docs import GENERATING_DOCUMENTATION
229 from IPython.utils.path import ensure_dir_exists
229 from IPython.utils.path import ensure_dir_exists
230 from IPython.utils.process import arg_split
230 from IPython.utils.process import arg_split
231 from traitlets import (
231 from traitlets import (
232 Bool,
232 Bool,
233 Enum,
233 Enum,
234 Int,
234 Int,
235 List as ListTrait,
235 List as ListTrait,
236 Unicode,
236 Unicode,
237 Dict as DictTrait,
237 Dict as DictTrait,
238 Union as UnionTrait,
238 Union as UnionTrait,
239 observe,
239 observe,
240 )
240 )
241 from traitlets.config.configurable import Configurable
241 from traitlets.config.configurable import Configurable
242
242
243 import __main__
243 import __main__
244
244
245 from typing import cast
245 from typing import cast
246
246
247 if sys.version_info < (3, 12):
247 if sys.version_info < (3, 12):
248 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
248 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
249 else:
249 else:
250 from typing import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
250 from typing import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
251
251
252
252
253 # skip module docstests
253 # skip module docstests
254 __skip_doctest__ = True
254 __skip_doctest__ = True
255
255
256
256
257 try:
257 try:
258 import jedi
258 import jedi
259 jedi.settings.case_insensitive_completion = False
259 jedi.settings.case_insensitive_completion = False
260 import jedi.api.helpers
260 import jedi.api.helpers
261 import jedi.api.classes
261 import jedi.api.classes
262 JEDI_INSTALLED = True
262 JEDI_INSTALLED = True
263 except ImportError:
263 except ImportError:
264 JEDI_INSTALLED = False
264 JEDI_INSTALLED = False
265
265
266
266
267
268 # -----------------------------------------------------------------------------
267 # -----------------------------------------------------------------------------
269 # Globals
268 # Globals
270 #-----------------------------------------------------------------------------
269 #-----------------------------------------------------------------------------
271
270
272 # ranges where we have most of the valid unicode names. We could be more finer
271 # ranges where we have most of the valid unicode names. We could be more finer
273 # grained but is it worth it for performance While unicode have character in the
272 # grained but is it worth it for performance While unicode have character in the
274 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
273 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
275 # write this). With below range we cover them all, with a density of ~67%
274 # write this). With below range we cover them all, with a density of ~67%
276 # biggest next gap we consider only adds up about 1% density and there are 600
275 # biggest next gap we consider only adds up about 1% density and there are 600
277 # gaps that would need hard coding.
276 # gaps that would need hard coding.
278 _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
277 _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
279
278
280 # Public API
279 # Public API
281 __all__ = ["Completer", "IPCompleter"]
280 __all__ = ["Completer", "IPCompleter"]
282
281
283 if sys.platform == 'win32':
282 if sys.platform == 'win32':
284 PROTECTABLES = ' '
283 PROTECTABLES = ' '
285 else:
284 else:
286 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
285 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
287
286
288 # Protect against returning an enormous number of completions which the frontend
287 # Protect against returning an enormous number of completions which the frontend
289 # may have trouble processing.
288 # may have trouble processing.
290 MATCHES_LIMIT = 500
289 MATCHES_LIMIT = 500
291
290
292 # Completion type reported when no type can be inferred.
291 # Completion type reported when no type can be inferred.
293 _UNKNOWN_TYPE = "<unknown>"
292 _UNKNOWN_TYPE = "<unknown>"
294
293
295 # sentinel value to signal lack of a match
294 # sentinel value to signal lack of a match
296 not_found = object()
295 not_found = object()
297
296
298 class ProvisionalCompleterWarning(FutureWarning):
297 class ProvisionalCompleterWarning(FutureWarning):
299 """
298 """
300 Exception raise by an experimental feature in this module.
299 Exception raise by an experimental feature in this module.
301
300
302 Wrap code in :any:`provisionalcompleter` context manager if you
301 Wrap code in :any:`provisionalcompleter` context manager if you
303 are certain you want to use an unstable feature.
302 are certain you want to use an unstable feature.
304 """
303 """
305 pass
304 pass
306
305
307 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
306 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
308
307
309
308
310 @skip_doctest
309 @skip_doctest
311 @contextmanager
310 @contextmanager
312 def provisionalcompleter(action='ignore'):
311 def provisionalcompleter(action='ignore'):
313 """
312 """
314 This context manager has to be used in any place where unstable completer
313 This context manager has to be used in any place where unstable completer
315 behavior and API may be called.
314 behavior and API may be called.
316
315
317 >>> with provisionalcompleter():
316 >>> with provisionalcompleter():
318 ... completer.do_experimental_things() # works
317 ... completer.do_experimental_things() # works
319
318
320 >>> completer.do_experimental_things() # raises.
319 >>> completer.do_experimental_things() # raises.
321
320
322 .. note::
321 .. note::
323
322
324 Unstable
323 Unstable
325
324
326 By using this context manager you agree that the API in use may change
325 By using this context manager you agree that the API in use may change
327 without warning, and that you won't complain if they do so.
326 without warning, and that you won't complain if they do so.
328
327
329 You also understand that, if the API is not to your liking, you should report
328 You also understand that, if the API is not to your liking, you should report
330 a bug to explain your use case upstream.
329 a bug to explain your use case upstream.
331
330
332 We'll be happy to get your feedback, feature requests, and improvements on
331 We'll be happy to get your feedback, feature requests, and improvements on
333 any of the unstable APIs!
332 any of the unstable APIs!
334 """
333 """
335 with warnings.catch_warnings():
334 with warnings.catch_warnings():
336 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
335 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
337 yield
336 yield
338
337
339
338
340 def has_open_quotes(s):
339 def has_open_quotes(s):
341 """Return whether a string has open quotes.
340 """Return whether a string has open quotes.
342
341
343 This simply counts whether the number of quote characters of either type in
342 This simply counts whether the number of quote characters of either type in
344 the string is odd.
343 the string is odd.
345
344
346 Returns
345 Returns
347 -------
346 -------
348 If there is an open quote, the quote character is returned. Else, return
347 If there is an open quote, the quote character is returned. Else, return
349 False.
348 False.
350 """
349 """
351 # We check " first, then ', so complex cases with nested quotes will get
350 # We check " first, then ', so complex cases with nested quotes will get
352 # the " to take precedence.
351 # the " to take precedence.
353 if s.count('"') % 2:
352 if s.count('"') % 2:
354 return '"'
353 return '"'
355 elif s.count("'") % 2:
354 elif s.count("'") % 2:
356 return "'"
355 return "'"
357 else:
356 else:
358 return False
357 return False
359
358
360
359
361 def protect_filename(s, protectables=PROTECTABLES):
360 def protect_filename(s, protectables=PROTECTABLES):
362 """Escape a string to protect certain characters."""
361 """Escape a string to protect certain characters."""
363 if set(s) & set(protectables):
362 if set(s) & set(protectables):
364 if sys.platform == "win32":
363 if sys.platform == "win32":
365 return '"' + s + '"'
364 return '"' + s + '"'
366 else:
365 else:
367 return "".join(("\\" + c if c in protectables else c) for c in s)
366 return "".join(("\\" + c if c in protectables else c) for c in s)
368 else:
367 else:
369 return s
368 return s
370
369
371
370
372 def expand_user(path:str) -> Tuple[str, bool, str]:
371 def expand_user(path:str) -> Tuple[str, bool, str]:
373 """Expand ``~``-style usernames in strings.
372 """Expand ``~``-style usernames in strings.
374
373
375 This is similar to :func:`os.path.expanduser`, but it computes and returns
374 This is similar to :func:`os.path.expanduser`, but it computes and returns
376 extra information that will be useful if the input was being used in
375 extra information that will be useful if the input was being used in
377 computing completions, and you wish to return the completions with the
376 computing completions, and you wish to return the completions with the
378 original '~' instead of its expanded value.
377 original '~' instead of its expanded value.
379
378
380 Parameters
379 Parameters
381 ----------
380 ----------
382 path : str
381 path : str
383 String to be expanded. If no ~ is present, the output is the same as the
382 String to be expanded. If no ~ is present, the output is the same as the
384 input.
383 input.
385
384
386 Returns
385 Returns
387 -------
386 -------
388 newpath : str
387 newpath : str
389 Result of ~ expansion in the input path.
388 Result of ~ expansion in the input path.
390 tilde_expand : bool
389 tilde_expand : bool
391 Whether any expansion was performed or not.
390 Whether any expansion was performed or not.
392 tilde_val : str
391 tilde_val : str
393 The value that ~ was replaced with.
392 The value that ~ was replaced with.
394 """
393 """
395 # Default values
394 # Default values
396 tilde_expand = False
395 tilde_expand = False
397 tilde_val = ''
396 tilde_val = ''
398 newpath = path
397 newpath = path
399
398
400 if path.startswith('~'):
399 if path.startswith('~'):
401 tilde_expand = True
400 tilde_expand = True
402 rest = len(path)-1
401 rest = len(path)-1
403 newpath = os.path.expanduser(path)
402 newpath = os.path.expanduser(path)
404 if rest:
403 if rest:
405 tilde_val = newpath[:-rest]
404 tilde_val = newpath[:-rest]
406 else:
405 else:
407 tilde_val = newpath
406 tilde_val = newpath
408
407
409 return newpath, tilde_expand, tilde_val
408 return newpath, tilde_expand, tilde_val
410
409
411
410
412 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
411 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
413 """Does the opposite of expand_user, with its outputs.
412 """Does the opposite of expand_user, with its outputs.
414 """
413 """
415 if tilde_expand:
414 if tilde_expand:
416 return path.replace(tilde_val, '~')
415 return path.replace(tilde_val, '~')
417 else:
416 else:
418 return path
417 return path
419
418
420
419
421 def completions_sorting_key(word):
420 def completions_sorting_key(word):
422 """key for sorting completions
421 """key for sorting completions
423
422
424 This does several things:
423 This does several things:
425
424
426 - Demote any completions starting with underscores to the end
425 - Demote any completions starting with underscores to the end
427 - Insert any %magic and %%cellmagic completions in the alphabetical order
426 - Insert any %magic and %%cellmagic completions in the alphabetical order
428 by their name
427 by their name
429 """
428 """
430 prio1, prio2 = 0, 0
429 prio1, prio2 = 0, 0
431
430
432 if word.startswith('__'):
431 if word.startswith('__'):
433 prio1 = 2
432 prio1 = 2
434 elif word.startswith('_'):
433 elif word.startswith('_'):
435 prio1 = 1
434 prio1 = 1
436
435
437 if word.endswith('='):
436 if word.endswith('='):
438 prio1 = -1
437 prio1 = -1
439
438
440 if word.startswith('%%'):
439 if word.startswith('%%'):
441 # If there's another % in there, this is something else, so leave it alone
440 # If there's another % in there, this is something else, so leave it alone
442 if not "%" in word[2:]:
441 if not "%" in word[2:]:
443 word = word[2:]
442 word = word[2:]
444 prio2 = 2
443 prio2 = 2
445 elif word.startswith('%'):
444 elif word.startswith('%'):
446 if not "%" in word[1:]:
445 if not "%" in word[1:]:
447 word = word[1:]
446 word = word[1:]
448 prio2 = 1
447 prio2 = 1
449
448
450 return prio1, word, prio2
449 return prio1, word, prio2
451
450
452
451
453 class _FakeJediCompletion:
452 class _FakeJediCompletion:
454 """
453 """
455 This is a workaround to communicate to the UI that Jedi has crashed and to
454 This is a workaround to communicate to the UI that Jedi has crashed and to
456 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
455 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
457
456
458 Added in IPython 6.0 so should likely be removed for 7.0
457 Added in IPython 6.0 so should likely be removed for 7.0
459
458
460 """
459 """
461
460
462 def __init__(self, name):
461 def __init__(self, name):
463
462
464 self.name = name
463 self.name = name
465 self.complete = name
464 self.complete = name
466 self.type = 'crashed'
465 self.type = 'crashed'
467 self.name_with_symbols = name
466 self.name_with_symbols = name
468 self.signature = ""
467 self.signature = ""
469 self._origin = "fake"
468 self._origin = "fake"
470 self.text = "crashed"
469 self.text = "crashed"
471
470
472 def __repr__(self):
471 def __repr__(self):
473 return '<Fake completion object jedi has crashed>'
472 return '<Fake completion object jedi has crashed>'
474
473
475
474
476 _JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
475 _JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
477
476
478
477
479 class Completion:
478 class Completion:
480 """
479 """
481 Completion object used and returned by IPython completers.
480 Completion object used and returned by IPython completers.
482
481
483 .. warning::
482 .. warning::
484
483
485 Unstable
484 Unstable
486
485
487 This function is unstable, API may change without warning.
486 This function is unstable, API may change without warning.
488 It will also raise unless use in proper context manager.
487 It will also raise unless use in proper context manager.
489
488
490 This act as a middle ground :any:`Completion` object between the
489 This act as a middle ground :any:`Completion` object between the
491 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
490 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
492 object. While Jedi need a lot of information about evaluator and how the
491 object. While Jedi need a lot of information about evaluator and how the
493 code should be ran/inspected, PromptToolkit (and other frontend) mostly
492 code should be ran/inspected, PromptToolkit (and other frontend) mostly
494 need user facing information.
493 need user facing information.
495
494
496 - Which range should be replaced replaced by what.
495 - Which range should be replaced replaced by what.
497 - Some metadata (like completion type), or meta information to displayed to
496 - Some metadata (like completion type), or meta information to displayed to
498 the use user.
497 the use user.
499
498
500 For debugging purpose we can also store the origin of the completion (``jedi``,
499 For debugging purpose we can also store the origin of the completion (``jedi``,
501 ``IPython.python_matches``, ``IPython.magics_matches``...).
500 ``IPython.python_matches``, ``IPython.magics_matches``...).
502 """
501 """
503
502
504 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
503 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
505
504
506 def __init__(
505 def __init__(
507 self,
506 self,
508 start: int,
507 start: int,
509 end: int,
508 end: int,
510 text: str,
509 text: str,
511 *,
510 *,
512 type: Optional[str] = None,
511 type: Optional[str] = None,
513 _origin="",
512 _origin="",
514 signature="",
513 signature="",
515 ) -> None:
514 ) -> None:
516 warnings.warn(
515 warnings.warn(
517 "``Completion`` is a provisional API (as of IPython 6.0). "
516 "``Completion`` is a provisional API (as of IPython 6.0). "
518 "It may change without warnings. "
517 "It may change without warnings. "
519 "Use in corresponding context manager.",
518 "Use in corresponding context manager.",
520 category=ProvisionalCompleterWarning,
519 category=ProvisionalCompleterWarning,
521 stacklevel=2,
520 stacklevel=2,
522 )
521 )
523
522
524 self.start = start
523 self.start = start
525 self.end = end
524 self.end = end
526 self.text = text
525 self.text = text
527 self.type = type
526 self.type = type
528 self.signature = signature
527 self.signature = signature
529 self._origin = _origin
528 self._origin = _origin
530
529
531 def __repr__(self):
530 def __repr__(self):
532 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
531 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
533 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
532 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
534
533
535 def __eq__(self, other) -> bool:
534 def __eq__(self, other) -> bool:
536 """
535 """
537 Equality and hash do not hash the type (as some completer may not be
536 Equality and hash do not hash the type (as some completer may not be
538 able to infer the type), but are use to (partially) de-duplicate
537 able to infer the type), but are use to (partially) de-duplicate
539 completion.
538 completion.
540
539
541 Completely de-duplicating completion is a bit tricker that just
540 Completely de-duplicating completion is a bit tricker that just
542 comparing as it depends on surrounding text, which Completions are not
541 comparing as it depends on surrounding text, which Completions are not
543 aware of.
542 aware of.
544 """
543 """
545 return self.start == other.start and \
544 return self.start == other.start and \
546 self.end == other.end and \
545 self.end == other.end and \
547 self.text == other.text
546 self.text == other.text
548
547
549 def __hash__(self):
548 def __hash__(self):
550 return hash((self.start, self.end, self.text))
549 return hash((self.start, self.end, self.text))
551
550
552
551
553 class SimpleCompletion:
552 class SimpleCompletion:
554 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
553 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
555
554
556 .. warning::
555 .. warning::
557
556
558 Provisional
557 Provisional
559
558
560 This class is used to describe the currently supported attributes of
559 This class is used to describe the currently supported attributes of
561 simple completion items, and any additional implementation details
560 simple completion items, and any additional implementation details
562 should not be relied on. Additional attributes may be included in
561 should not be relied on. Additional attributes may be included in
563 future versions, and meaning of text disambiguated from the current
562 future versions, and meaning of text disambiguated from the current
564 dual meaning of "text to insert" and "text to used as a label".
563 dual meaning of "text to insert" and "text to used as a label".
565 """
564 """
566
565
567 __slots__ = ["text", "type"]
566 __slots__ = ["text", "type"]
568
567
569 def __init__(self, text: str, *, type: Optional[str] = None):
568 def __init__(self, text: str, *, type: Optional[str] = None):
570 self.text = text
569 self.text = text
571 self.type = type
570 self.type = type
572
571
573 def __repr__(self):
572 def __repr__(self):
574 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
573 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
575
574
576
575
577 class _MatcherResultBase(TypedDict):
576 class _MatcherResultBase(TypedDict):
578 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
577 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
579
578
580 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
579 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
581 matched_fragment: NotRequired[str]
580 matched_fragment: NotRequired[str]
582
581
583 #: Whether to suppress results from all other matchers (True), some
582 #: Whether to suppress results from all other matchers (True), some
584 #: matchers (set of identifiers) or none (False); default is False.
583 #: matchers (set of identifiers) or none (False); default is False.
585 suppress: NotRequired[Union[bool, Set[str]]]
584 suppress: NotRequired[Union[bool, Set[str]]]
586
585
587 #: Identifiers of matchers which should NOT be suppressed when this matcher
586 #: Identifiers of matchers which should NOT be suppressed when this matcher
588 #: requests to suppress all other matchers; defaults to an empty set.
587 #: requests to suppress all other matchers; defaults to an empty set.
589 do_not_suppress: NotRequired[Set[str]]
588 do_not_suppress: NotRequired[Set[str]]
590
589
591 #: Are completions already ordered and should be left as-is? default is False.
590 #: Are completions already ordered and should be left as-is? default is False.
592 ordered: NotRequired[bool]
591 ordered: NotRequired[bool]
593
592
594
593
595 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
594 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
596 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
595 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
597 """Result of new-style completion matcher."""
596 """Result of new-style completion matcher."""
598
597
599 # note: TypedDict is added again to the inheritance chain
598 # note: TypedDict is added again to the inheritance chain
600 # in order to get __orig_bases__ for documentation
599 # in order to get __orig_bases__ for documentation
601
600
602 #: List of candidate completions
601 #: List of candidate completions
603 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
602 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
604
603
605
604
606 class _JediMatcherResult(_MatcherResultBase):
605 class _JediMatcherResult(_MatcherResultBase):
607 """Matching result returned by Jedi (will be processed differently)"""
606 """Matching result returned by Jedi (will be processed differently)"""
608
607
609 #: list of candidate completions
608 #: list of candidate completions
610 completions: Iterator[_JediCompletionLike]
609 completions: Iterator[_JediCompletionLike]
611
610
612
611
613 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
612 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
614 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
613 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
615
614
616
615
617 @dataclass
616 @dataclass
618 class CompletionContext:
617 class CompletionContext:
619 """Completion context provided as an argument to matchers in the Matcher API v2."""
618 """Completion context provided as an argument to matchers in the Matcher API v2."""
620
619
621 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
620 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
622 # which was not explicitly visible as an argument of the matcher, making any refactor
621 # which was not explicitly visible as an argument of the matcher, making any refactor
623 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
622 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
624 # from the completer, and make substituting them in sub-classes easier.
623 # from the completer, and make substituting them in sub-classes easier.
625
624
626 #: Relevant fragment of code directly preceding the cursor.
625 #: Relevant fragment of code directly preceding the cursor.
627 #: The extraction of token is implemented via splitter heuristic
626 #: The extraction of token is implemented via splitter heuristic
628 #: (following readline behaviour for legacy reasons), which is user configurable
627 #: (following readline behaviour for legacy reasons), which is user configurable
629 #: (by switching the greedy mode).
628 #: (by switching the greedy mode).
630 token: str
629 token: str
631
630
632 #: The full available content of the editor or buffer
631 #: The full available content of the editor or buffer
633 full_text: str
632 full_text: str
634
633
635 #: Cursor position in the line (the same for ``full_text`` and ``text``).
634 #: Cursor position in the line (the same for ``full_text`` and ``text``).
636 cursor_position: int
635 cursor_position: int
637
636
638 #: Cursor line in ``full_text``.
637 #: Cursor line in ``full_text``.
639 cursor_line: int
638 cursor_line: int
640
639
641 #: The maximum number of completions that will be used downstream.
640 #: The maximum number of completions that will be used downstream.
642 #: Matchers can use this information to abort early.
641 #: Matchers can use this information to abort early.
643 #: The built-in Jedi matcher is currently excepted from this limit.
642 #: The built-in Jedi matcher is currently excepted from this limit.
644 # If not given, return all possible completions.
643 # If not given, return all possible completions.
645 limit: Optional[int]
644 limit: Optional[int]
646
645
647 @cached_property
646 @cached_property
648 def text_until_cursor(self) -> str:
647 def text_until_cursor(self) -> str:
649 return self.line_with_cursor[: self.cursor_position]
648 return self.line_with_cursor[: self.cursor_position]
650
649
651 @cached_property
650 @cached_property
652 def line_with_cursor(self) -> str:
651 def line_with_cursor(self) -> str:
653 return self.full_text.split("\n")[self.cursor_line]
652 return self.full_text.split("\n")[self.cursor_line]
654
653
655
654
656 #: Matcher results for API v2.
655 #: Matcher results for API v2.
657 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
656 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
658
657
659
658
660 class _MatcherAPIv1Base(Protocol):
659 class _MatcherAPIv1Base(Protocol):
661 def __call__(self, text: str) -> List[str]:
660 def __call__(self, text: str) -> List[str]:
662 """Call signature."""
661 """Call signature."""
663 ...
662 ...
664
663
665 #: Used to construct the default matcher identifier
664 #: Used to construct the default matcher identifier
666 __qualname__: str
665 __qualname__: str
667
666
668
667
669 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
668 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
670 #: API version
669 #: API version
671 matcher_api_version: Optional[Literal[1]]
670 matcher_api_version: Optional[Literal[1]]
672
671
673 def __call__(self, text: str) -> List[str]:
672 def __call__(self, text: str) -> List[str]:
674 """Call signature."""
673 """Call signature."""
675 ...
674 ...
676
675
677
676
678 #: Protocol describing Matcher API v1.
677 #: Protocol describing Matcher API v1.
679 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
678 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
680
679
681
680
682 class MatcherAPIv2(Protocol):
681 class MatcherAPIv2(Protocol):
683 """Protocol describing Matcher API v2."""
682 """Protocol describing Matcher API v2."""
684
683
685 #: API version
684 #: API version
686 matcher_api_version: Literal[2] = 2
685 matcher_api_version: Literal[2] = 2
687
686
688 def __call__(self, context: CompletionContext) -> MatcherResult:
687 def __call__(self, context: CompletionContext) -> MatcherResult:
689 """Call signature."""
688 """Call signature."""
690 ...
689 ...
691
690
692 #: Used to construct the default matcher identifier
691 #: Used to construct the default matcher identifier
693 __qualname__: str
692 __qualname__: str
694
693
695
694
696 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
695 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
697
696
698
697
699 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
698 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
700 api_version = _get_matcher_api_version(matcher)
699 api_version = _get_matcher_api_version(matcher)
701 return api_version == 1
700 return api_version == 1
702
701
703
702
704 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
703 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
705 api_version = _get_matcher_api_version(matcher)
704 api_version = _get_matcher_api_version(matcher)
706 return api_version == 2
705 return api_version == 2
707
706
708
707
709 def _is_sizable(value: Any) -> TypeGuard[Sized]:
708 def _is_sizable(value: Any) -> TypeGuard[Sized]:
710 """Determines whether objects is sizable"""
709 """Determines whether objects is sizable"""
711 return hasattr(value, "__len__")
710 return hasattr(value, "__len__")
712
711
713
712
714 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
713 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
715 """Determines whether objects is sizable"""
714 """Determines whether objects is sizable"""
716 return hasattr(value, "__next__")
715 return hasattr(value, "__next__")
717
716
718
717
719 def has_any_completions(result: MatcherResult) -> bool:
718 def has_any_completions(result: MatcherResult) -> bool:
720 """Check if any result includes any completions."""
719 """Check if any result includes any completions."""
721 completions = result["completions"]
720 completions = result["completions"]
722 if _is_sizable(completions):
721 if _is_sizable(completions):
723 return len(completions) != 0
722 return len(completions) != 0
724 if _is_iterator(completions):
723 if _is_iterator(completions):
725 try:
724 try:
726 old_iterator = completions
725 old_iterator = completions
727 first = next(old_iterator)
726 first = next(old_iterator)
728 result["completions"] = cast(
727 result["completions"] = cast(
729 Iterator[SimpleCompletion],
728 Iterator[SimpleCompletion],
730 itertools.chain([first], old_iterator),
729 itertools.chain([first], old_iterator),
731 )
730 )
732 return True
731 return True
733 except StopIteration:
732 except StopIteration:
734 return False
733 return False
735 raise ValueError(
734 raise ValueError(
736 "Completions returned by matcher need to be an Iterator or a Sizable"
735 "Completions returned by matcher need to be an Iterator or a Sizable"
737 )
736 )
738
737
739
738
740 def completion_matcher(
739 def completion_matcher(
741 *,
740 *,
742 priority: Optional[float] = None,
741 priority: Optional[float] = None,
743 identifier: Optional[str] = None,
742 identifier: Optional[str] = None,
744 api_version: int = 1,
743 api_version: int = 1,
745 ):
744 ):
746 """Adds attributes describing the matcher.
745 """Adds attributes describing the matcher.
747
746
748 Parameters
747 Parameters
749 ----------
748 ----------
750 priority : Optional[float]
749 priority : Optional[float]
751 The priority of the matcher, determines the order of execution of matchers.
750 The priority of the matcher, determines the order of execution of matchers.
752 Higher priority means that the matcher will be executed first. Defaults to 0.
751 Higher priority means that the matcher will be executed first. Defaults to 0.
753 identifier : Optional[str]
752 identifier : Optional[str]
754 identifier of the matcher allowing users to modify the behaviour via traitlets,
753 identifier of the matcher allowing users to modify the behaviour via traitlets,
755 and also used to for debugging (will be passed as ``origin`` with the completions).
754 and also used to for debugging (will be passed as ``origin`` with the completions).
756
755
757 Defaults to matcher function's ``__qualname__`` (for example,
756 Defaults to matcher function's ``__qualname__`` (for example,
758 ``IPCompleter.file_matcher`` for the built-in matched defined
757 ``IPCompleter.file_matcher`` for the built-in matched defined
759 as a ``file_matcher`` method of the ``IPCompleter`` class).
758 as a ``file_matcher`` method of the ``IPCompleter`` class).
760 api_version: Optional[int]
759 api_version: Optional[int]
761 version of the Matcher API used by this matcher.
760 version of the Matcher API used by this matcher.
762 Currently supported values are 1 and 2.
761 Currently supported values are 1 and 2.
763 Defaults to 1.
762 Defaults to 1.
764 """
763 """
765
764
766 def wrapper(func: Matcher):
765 def wrapper(func: Matcher):
767 func.matcher_priority = priority or 0 # type: ignore
766 func.matcher_priority = priority or 0 # type: ignore
768 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
767 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
769 func.matcher_api_version = api_version # type: ignore
768 func.matcher_api_version = api_version # type: ignore
770 if TYPE_CHECKING:
769 if TYPE_CHECKING:
771 if api_version == 1:
770 if api_version == 1:
772 func = cast(MatcherAPIv1, func)
771 func = cast(MatcherAPIv1, func)
773 elif api_version == 2:
772 elif api_version == 2:
774 func = cast(MatcherAPIv2, func)
773 func = cast(MatcherAPIv2, func)
775 return func
774 return func
776
775
777 return wrapper
776 return wrapper
778
777
779
778
780 def _get_matcher_priority(matcher: Matcher):
779 def _get_matcher_priority(matcher: Matcher):
781 return getattr(matcher, "matcher_priority", 0)
780 return getattr(matcher, "matcher_priority", 0)
782
781
783
782
784 def _get_matcher_id(matcher: Matcher):
783 def _get_matcher_id(matcher: Matcher):
785 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
784 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
786
785
787
786
788 def _get_matcher_api_version(matcher):
787 def _get_matcher_api_version(matcher):
789 return getattr(matcher, "matcher_api_version", 1)
788 return getattr(matcher, "matcher_api_version", 1)
790
789
791
790
792 context_matcher = partial(completion_matcher, api_version=2)
791 context_matcher = partial(completion_matcher, api_version=2)
793
792
794
793
795 _IC = Iterable[Completion]
794 _IC = Iterable[Completion]
796
795
797
796
798 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
797 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
799 """
798 """
800 Deduplicate a set of completions.
799 Deduplicate a set of completions.
801
800
802 .. warning::
801 .. warning::
803
802
804 Unstable
803 Unstable
805
804
806 This function is unstable, API may change without warning.
805 This function is unstable, API may change without warning.
807
806
808 Parameters
807 Parameters
809 ----------
808 ----------
810 text : str
809 text : str
811 text that should be completed.
810 text that should be completed.
812 completions : Iterator[Completion]
811 completions : Iterator[Completion]
813 iterator over the completions to deduplicate
812 iterator over the completions to deduplicate
814
813
815 Yields
814 Yields
816 ------
815 ------
817 `Completions` objects
816 `Completions` objects
818 Completions coming from multiple sources, may be different but end up having
817 Completions coming from multiple sources, may be different but end up having
819 the same effect when applied to ``text``. If this is the case, this will
818 the same effect when applied to ``text``. If this is the case, this will
820 consider completions as equal and only emit the first encountered.
819 consider completions as equal and only emit the first encountered.
821 Not folded in `completions()` yet for debugging purpose, and to detect when
820 Not folded in `completions()` yet for debugging purpose, and to detect when
822 the IPython completer does return things that Jedi does not, but should be
821 the IPython completer does return things that Jedi does not, but should be
823 at some point.
822 at some point.
824 """
823 """
825 completions = list(completions)
824 completions = list(completions)
826 if not completions:
825 if not completions:
827 return
826 return
828
827
829 new_start = min(c.start for c in completions)
828 new_start = min(c.start for c in completions)
830 new_end = max(c.end for c in completions)
829 new_end = max(c.end for c in completions)
831
830
832 seen = set()
831 seen = set()
833 for c in completions:
832 for c in completions:
834 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
833 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
835 if new_text not in seen:
834 if new_text not in seen:
836 yield c
835 yield c
837 seen.add(new_text)
836 seen.add(new_text)
838
837
839
838
840 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
839 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
841 """
840 """
842 Rectify a set of completions to all have the same ``start`` and ``end``
841 Rectify a set of completions to all have the same ``start`` and ``end``
843
842
844 .. warning::
843 .. warning::
845
844
846 Unstable
845 Unstable
847
846
848 This function is unstable, API may change without warning.
847 This function is unstable, API may change without warning.
849 It will also raise unless use in proper context manager.
848 It will also raise unless use in proper context manager.
850
849
851 Parameters
850 Parameters
852 ----------
851 ----------
853 text : str
852 text : str
854 text that should be completed.
853 text that should be completed.
855 completions : Iterator[Completion]
854 completions : Iterator[Completion]
856 iterator over the completions to rectify
855 iterator over the completions to rectify
857 _debug : bool
856 _debug : bool
858 Log failed completion
857 Log failed completion
859
858
860 Notes
859 Notes
861 -----
860 -----
862 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
861 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
863 the Jupyter Protocol requires them to behave like so. This will readjust
862 the Jupyter Protocol requires them to behave like so. This will readjust
864 the completion to have the same ``start`` and ``end`` by padding both
863 the completion to have the same ``start`` and ``end`` by padding both
865 extremities with surrounding text.
864 extremities with surrounding text.
866
865
867 During stabilisation should support a ``_debug`` option to log which
866 During stabilisation should support a ``_debug`` option to log which
868 completion are return by the IPython completer and not found in Jedi in
867 completion are return by the IPython completer and not found in Jedi in
869 order to make upstream bug report.
868 order to make upstream bug report.
870 """
869 """
871 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
870 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
872 "It may change without warnings. "
871 "It may change without warnings. "
873 "Use in corresponding context manager.",
872 "Use in corresponding context manager.",
874 category=ProvisionalCompleterWarning, stacklevel=2)
873 category=ProvisionalCompleterWarning, stacklevel=2)
875
874
876 completions = list(completions)
875 completions = list(completions)
877 if not completions:
876 if not completions:
878 return
877 return
879 starts = (c.start for c in completions)
878 starts = (c.start for c in completions)
880 ends = (c.end for c in completions)
879 ends = (c.end for c in completions)
881
880
882 new_start = min(starts)
881 new_start = min(starts)
883 new_end = max(ends)
882 new_end = max(ends)
884
883
885 seen_jedi = set()
884 seen_jedi = set()
886 seen_python_matches = set()
885 seen_python_matches = set()
887 for c in completions:
886 for c in completions:
888 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
887 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
889 if c._origin == 'jedi':
888 if c._origin == 'jedi':
890 seen_jedi.add(new_text)
889 seen_jedi.add(new_text)
891 elif c._origin == "IPCompleter.python_matcher":
890 elif c._origin == "IPCompleter.python_matcher":
892 seen_python_matches.add(new_text)
891 seen_python_matches.add(new_text)
893 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
892 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
894 diff = seen_python_matches.difference(seen_jedi)
893 diff = seen_python_matches.difference(seen_jedi)
895 if diff and _debug:
894 if diff and _debug:
896 print('IPython.python matches have extras:', diff)
895 print('IPython.python matches have extras:', diff)
897
896
898
897
899 if sys.platform == 'win32':
898 if sys.platform == 'win32':
900 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
899 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
901 else:
900 else:
902 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
901 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
903
902
904 GREEDY_DELIMS = ' =\r\n'
903 GREEDY_DELIMS = ' =\r\n'
905
904
906
905
907 class CompletionSplitter(object):
906 class CompletionSplitter(object):
908 """An object to split an input line in a manner similar to readline.
907 """An object to split an input line in a manner similar to readline.
909
908
910 By having our own implementation, we can expose readline-like completion in
909 By having our own implementation, we can expose readline-like completion in
911 a uniform manner to all frontends. This object only needs to be given the
910 a uniform manner to all frontends. This object only needs to be given the
912 line of text to be split and the cursor position on said line, and it
911 line of text to be split and the cursor position on said line, and it
913 returns the 'word' to be completed on at the cursor after splitting the
912 returns the 'word' to be completed on at the cursor after splitting the
914 entire line.
913 entire line.
915
914
916 What characters are used as splitting delimiters can be controlled by
915 What characters are used as splitting delimiters can be controlled by
917 setting the ``delims`` attribute (this is a property that internally
916 setting the ``delims`` attribute (this is a property that internally
918 automatically builds the necessary regular expression)"""
917 automatically builds the necessary regular expression)"""
919
918
920 # Private interface
919 # Private interface
921
920
922 # A string of delimiter characters. The default value makes sense for
921 # A string of delimiter characters. The default value makes sense for
923 # IPython's most typical usage patterns.
922 # IPython's most typical usage patterns.
924 _delims = DELIMS
923 _delims = DELIMS
925
924
926 # The expression (a normal string) to be compiled into a regular expression
925 # The expression (a normal string) to be compiled into a regular expression
927 # for actual splitting. We store it as an attribute mostly for ease of
926 # for actual splitting. We store it as an attribute mostly for ease of
928 # debugging, since this type of code can be so tricky to debug.
927 # debugging, since this type of code can be so tricky to debug.
929 _delim_expr = None
928 _delim_expr = None
930
929
931 # The regular expression that does the actual splitting
930 # The regular expression that does the actual splitting
932 _delim_re = None
931 _delim_re = None
933
932
934 def __init__(self, delims=None):
933 def __init__(self, delims=None):
935 delims = CompletionSplitter._delims if delims is None else delims
934 delims = CompletionSplitter._delims if delims is None else delims
936 self.delims = delims
935 self.delims = delims
937
936
938 @property
937 @property
939 def delims(self):
938 def delims(self):
940 """Return the string of delimiter characters."""
939 """Return the string of delimiter characters."""
941 return self._delims
940 return self._delims
942
941
943 @delims.setter
942 @delims.setter
944 def delims(self, delims):
943 def delims(self, delims):
945 """Set the delimiters for line splitting."""
944 """Set the delimiters for line splitting."""
946 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
945 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
947 self._delim_re = re.compile(expr)
946 self._delim_re = re.compile(expr)
948 self._delims = delims
947 self._delims = delims
949 self._delim_expr = expr
948 self._delim_expr = expr
950
949
951 def split_line(self, line, cursor_pos=None):
950 def split_line(self, line, cursor_pos=None):
952 """Split a line of text with a cursor at the given position.
951 """Split a line of text with a cursor at the given position.
953 """
952 """
954 l = line if cursor_pos is None else line[:cursor_pos]
953 l = line if cursor_pos is None else line[:cursor_pos]
955 return self._delim_re.split(l)[-1]
954 return self._delim_re.split(l)[-1]
956
955
957
956
958
957
959 class Completer(Configurable):
958 class Completer(Configurable):
960
959
961 greedy = Bool(
960 greedy = Bool(
962 False,
961 False,
963 help="""Activate greedy completion.
962 help="""Activate greedy completion.
964
963
965 .. deprecated:: 8.8
964 .. deprecated:: 8.8
966 Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
965 Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
967
966
968 When enabled in IPython 8.8 or newer, changes configuration as follows:
967 When enabled in IPython 8.8 or newer, changes configuration as follows:
969
968
970 - ``Completer.evaluation = 'unsafe'``
969 - ``Completer.evaluation = 'unsafe'``
971 - ``Completer.auto_close_dict_keys = True``
970 - ``Completer.auto_close_dict_keys = True``
972 """,
971 """,
973 ).tag(config=True)
972 ).tag(config=True)
974
973
975 evaluation = Enum(
974 evaluation = Enum(
976 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
975 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
977 default_value="limited",
976 default_value="limited",
978 help="""Policy for code evaluation under completion.
977 help="""Policy for code evaluation under completion.
979
978
980 Successive options allow to enable more eager evaluation for better
979 Successive options allow to enable more eager evaluation for better
981 completion suggestions, including for nested dictionaries, nested lists,
980 completion suggestions, including for nested dictionaries, nested lists,
982 or even results of function calls.
981 or even results of function calls.
983 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
982 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
984 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
983 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
985
984
986 Allowed values are:
985 Allowed values are:
987
986
988 - ``forbidden``: no evaluation of code is permitted,
987 - ``forbidden``: no evaluation of code is permitted,
989 - ``minimal``: evaluation of literals and access to built-in namespace;
988 - ``minimal``: evaluation of literals and access to built-in namespace;
990 no item/attribute evaluationm no access to locals/globals,
989 no item/attribute evaluationm no access to locals/globals,
991 no evaluation of any operations or comparisons.
990 no evaluation of any operations or comparisons.
992 - ``limited``: access to all namespaces, evaluation of hard-coded methods
991 - ``limited``: access to all namespaces, evaluation of hard-coded methods
993 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
992 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
994 :any:`object.__getitem__`) on allow-listed objects (for example:
993 :any:`object.__getitem__`) on allow-listed objects (for example:
995 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
994 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
996 - ``unsafe``: evaluation of all methods and function calls but not of
995 - ``unsafe``: evaluation of all methods and function calls but not of
997 syntax with side-effects like `del x`,
996 syntax with side-effects like `del x`,
998 - ``dangerous``: completely arbitrary evaluation.
997 - ``dangerous``: completely arbitrary evaluation.
999 """,
998 """,
1000 ).tag(config=True)
999 ).tag(config=True)
1001
1000
1002 use_jedi = Bool(default_value=JEDI_INSTALLED,
1001 use_jedi = Bool(default_value=JEDI_INSTALLED,
1003 help="Experimental: Use Jedi to generate autocompletions. "
1002 help="Experimental: Use Jedi to generate autocompletions. "
1004 "Default to True if jedi is installed.").tag(config=True)
1003 "Default to True if jedi is installed.").tag(config=True)
1005
1004
1006 jedi_compute_type_timeout = Int(default_value=400,
1005 jedi_compute_type_timeout = Int(default_value=400,
1007 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
1006 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
1008 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
1007 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
1009 performance by preventing jedi to build its cache.
1008 performance by preventing jedi to build its cache.
1010 """).tag(config=True)
1009 """).tag(config=True)
1011
1010
1012 debug = Bool(default_value=False,
1011 debug = Bool(default_value=False,
1013 help='Enable debug for the Completer. Mostly print extra '
1012 help='Enable debug for the Completer. Mostly print extra '
1014 'information for experimental jedi integration.')\
1013 'information for experimental jedi integration.')\
1015 .tag(config=True)
1014 .tag(config=True)
1016
1015
1017 backslash_combining_completions = Bool(True,
1016 backslash_combining_completions = Bool(True,
1018 help="Enable unicode completions, e.g. \\alpha<tab> . "
1017 help="Enable unicode completions, e.g. \\alpha<tab> . "
1019 "Includes completion of latex commands, unicode names, and expanding "
1018 "Includes completion of latex commands, unicode names, and expanding "
1020 "unicode characters back to latex commands.").tag(config=True)
1019 "unicode characters back to latex commands.").tag(config=True)
1021
1020
1022 auto_close_dict_keys = Bool(
1021 auto_close_dict_keys = Bool(
1023 False,
1022 False,
1024 help="""
1023 help="""
1025 Enable auto-closing dictionary keys.
1024 Enable auto-closing dictionary keys.
1026
1025
1027 When enabled string keys will be suffixed with a final quote
1026 When enabled string keys will be suffixed with a final quote
1028 (matching the opening quote), tuple keys will also receive a
1027 (matching the opening quote), tuple keys will also receive a
1029 separating comma if needed, and keys which are final will
1028 separating comma if needed, and keys which are final will
1030 receive a closing bracket (``]``).
1029 receive a closing bracket (``]``).
1031 """,
1030 """,
1032 ).tag(config=True)
1031 ).tag(config=True)
1033
1032
1034 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1033 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1035 """Create a new completer for the command line.
1034 """Create a new completer for the command line.
1036
1035
1037 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
1036 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
1038
1037
1039 If unspecified, the default namespace where completions are performed
1038 If unspecified, the default namespace where completions are performed
1040 is __main__ (technically, __main__.__dict__). Namespaces should be
1039 is __main__ (technically, __main__.__dict__). Namespaces should be
1041 given as dictionaries.
1040 given as dictionaries.
1042
1041
1043 An optional second namespace can be given. This allows the completer
1042 An optional second namespace can be given. This allows the completer
1044 to handle cases where both the local and global scopes need to be
1043 to handle cases where both the local and global scopes need to be
1045 distinguished.
1044 distinguished.
1046 """
1045 """
1047
1046
1048 # Don't bind to namespace quite yet, but flag whether the user wants a
1047 # Don't bind to namespace quite yet, but flag whether the user wants a
1049 # specific namespace or to use __main__.__dict__. This will allow us
1048 # specific namespace or to use __main__.__dict__. This will allow us
1050 # to bind to __main__.__dict__ at completion time, not now.
1049 # to bind to __main__.__dict__ at completion time, not now.
1051 if namespace is None:
1050 if namespace is None:
1052 self.use_main_ns = True
1051 self.use_main_ns = True
1053 else:
1052 else:
1054 self.use_main_ns = False
1053 self.use_main_ns = False
1055 self.namespace = namespace
1054 self.namespace = namespace
1056
1055
1057 # The global namespace, if given, can be bound directly
1056 # The global namespace, if given, can be bound directly
1058 if global_namespace is None:
1057 if global_namespace is None:
1059 self.global_namespace = {}
1058 self.global_namespace = {}
1060 else:
1059 else:
1061 self.global_namespace = global_namespace
1060 self.global_namespace = global_namespace
1062
1061
1063 self.custom_matchers = []
1062 self.custom_matchers = []
1064
1063
1065 super(Completer, self).__init__(**kwargs)
1064 super(Completer, self).__init__(**kwargs)
1066
1065
1067 def complete(self, text, state):
1066 def complete(self, text, state):
1068 """Return the next possible completion for 'text'.
1067 """Return the next possible completion for 'text'.
1069
1068
1070 This is called successively with state == 0, 1, 2, ... until it
1069 This is called successively with state == 0, 1, 2, ... until it
1071 returns None. The completion should begin with 'text'.
1070 returns None. The completion should begin with 'text'.
1072
1071
1073 """
1072 """
1074 if self.use_main_ns:
1073 if self.use_main_ns:
1075 self.namespace = __main__.__dict__
1074 self.namespace = __main__.__dict__
1076
1075
1077 if state == 0:
1076 if state == 0:
1078 if "." in text:
1077 if "." in text:
1079 self.matches = self.attr_matches(text)
1078 self.matches = self.attr_matches(text)
1080 else:
1079 else:
1081 self.matches = self.global_matches(text)
1080 self.matches = self.global_matches(text)
1082 try:
1081 try:
1083 return self.matches[state]
1082 return self.matches[state]
1084 except IndexError:
1083 except IndexError:
1085 return None
1084 return None
1086
1085
1087 def global_matches(self, text):
1086 def global_matches(self, text):
1088 """Compute matches when text is a simple name.
1087 """Compute matches when text is a simple name.
1089
1088
1090 Return a list of all keywords, built-in functions and names currently
1089 Return a list of all keywords, built-in functions and names currently
1091 defined in self.namespace or self.global_namespace that match.
1090 defined in self.namespace or self.global_namespace that match.
1092
1091
1093 """
1092 """
1094 matches = []
1093 matches = []
1095 match_append = matches.append
1094 match_append = matches.append
1096 n = len(text)
1095 n = len(text)
1097 for lst in [
1096 for lst in [
1098 keyword.kwlist,
1097 keyword.kwlist,
1099 builtin_mod.__dict__.keys(),
1098 builtin_mod.__dict__.keys(),
1100 list(self.namespace.keys()),
1099 list(self.namespace.keys()),
1101 list(self.global_namespace.keys()),
1100 list(self.global_namespace.keys()),
1102 ]:
1101 ]:
1103 for word in lst:
1102 for word in lst:
1104 if word[:n] == text and word != "__builtins__":
1103 if word[:n] == text and word != "__builtins__":
1105 match_append(word)
1104 match_append(word)
1106
1105
1107 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1106 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1108 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1107 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1109 shortened = {
1108 shortened = {
1110 "_".join([sub[0] for sub in word.split("_")]): word
1109 "_".join([sub[0] for sub in word.split("_")]): word
1111 for word in lst
1110 for word in lst
1112 if snake_case_re.match(word)
1111 if snake_case_re.match(word)
1113 }
1112 }
1114 for word in shortened.keys():
1113 for word in shortened.keys():
1115 if word[:n] == text and word != "__builtins__":
1114 if word[:n] == text and word != "__builtins__":
1116 match_append(shortened[word])
1115 match_append(shortened[word])
1117 return matches
1116 return matches
1118
1117
1119 def attr_matches(self, text):
1118 def attr_matches(self, text):
1120 """Compute matches when text contains a dot.
1119 """Compute matches when text contains a dot.
1121
1120
1122 Assuming the text is of the form NAME.NAME....[NAME], and is
1121 Assuming the text is of the form NAME.NAME....[NAME], and is
1123 evaluatable in self.namespace or self.global_namespace, it will be
1122 evaluatable in self.namespace or self.global_namespace, it will be
1124 evaluated and its attributes (as revealed by dir()) are used as
1123 evaluated and its attributes (as revealed by dir()) are used as
1125 possible completions. (For class instances, class members are
1124 possible completions. (For class instances, class members are
1126 also considered.)
1125 also considered.)
1127
1126
1128 WARNING: this can still invoke arbitrary C code, if an object
1127 WARNING: this can still invoke arbitrary C code, if an object
1129 with a __getattr__ hook is evaluated.
1128 with a __getattr__ hook is evaluated.
1130
1129
1131 """
1130 """
1132 return self._attr_matches(text)[0]
1131 return self._attr_matches(text)[0]
1133
1132
1134 def _attr_matches(self, text, include_prefix=True) -> Tuple[Sequence[str], str]:
1133 def _attr_matches(self, text, include_prefix=True) -> Tuple[Sequence[str], str]:
1135 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1134 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1136 if not m2:
1135 if not m2:
1137 return [], ""
1136 return [], ""
1138 expr, attr = m2.group(1, 2)
1137 expr, attr = m2.group(1, 2)
1139
1138
1140 obj = self._evaluate_expr(expr)
1139 obj = self._evaluate_expr(expr)
1141
1140
1142 if obj is not_found:
1141 if obj is not_found:
1143 return [], ""
1142 return [], ""
1144
1143
1145 if self.limit_to__all__ and hasattr(obj, '__all__'):
1144 if self.limit_to__all__ and hasattr(obj, '__all__'):
1146 words = get__all__entries(obj)
1145 words = get__all__entries(obj)
1147 else:
1146 else:
1148 words = dir2(obj)
1147 words = dir2(obj)
1149
1148
1150 try:
1149 try:
1151 words = generics.complete_object(obj, words)
1150 words = generics.complete_object(obj, words)
1152 except TryNext:
1151 except TryNext:
1153 pass
1152 pass
1154 except AssertionError:
1153 except AssertionError:
1155 raise
1154 raise
1156 except Exception:
1155 except Exception:
1157 # Silence errors from completion function
1156 # Silence errors from completion function
1158 pass
1157 pass
1159 # Build match list to return
1158 # Build match list to return
1160 n = len(attr)
1159 n = len(attr)
1161
1160
1162 # Note: ideally we would just return words here and the prefix
1161 # Note: ideally we would just return words here and the prefix
1163 # reconciliator would know that we intend to append to rather than
1162 # reconciliator would know that we intend to append to rather than
1164 # replace the input text; this requires refactoring to return range
1163 # replace the input text; this requires refactoring to return range
1165 # which ought to be replaced (as does jedi).
1164 # which ought to be replaced (as does jedi).
1166 if include_prefix:
1165 if include_prefix:
1167 tokens = _parse_tokens(expr)
1166 tokens = _parse_tokens(expr)
1168 rev_tokens = reversed(tokens)
1167 rev_tokens = reversed(tokens)
1169 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1168 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1170 name_turn = True
1169 name_turn = True
1171
1170
1172 parts = []
1171 parts = []
1173 for token in rev_tokens:
1172 for token in rev_tokens:
1174 if token.type in skip_over:
1173 if token.type in skip_over:
1175 continue
1174 continue
1176 if token.type == tokenize.NAME and name_turn:
1175 if token.type == tokenize.NAME and name_turn:
1177 parts.append(token.string)
1176 parts.append(token.string)
1178 name_turn = False
1177 name_turn = False
1179 elif (
1178 elif (
1180 token.type == tokenize.OP and token.string == "." and not name_turn
1179 token.type == tokenize.OP and token.string == "." and not name_turn
1181 ):
1180 ):
1182 parts.append(token.string)
1181 parts.append(token.string)
1183 name_turn = True
1182 name_turn = True
1184 else:
1183 else:
1185 # short-circuit if not empty nor name token
1184 # short-circuit if not empty nor name token
1186 break
1185 break
1187
1186
1188 prefix_after_space = "".join(reversed(parts))
1187 prefix_after_space = "".join(reversed(parts))
1189 else:
1188 else:
1190 prefix_after_space = ""
1189 prefix_after_space = ""
1191
1190
1192 return (
1191 return (
1193 ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr],
1192 ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr],
1194 "." + attr,
1193 "." + attr,
1195 )
1194 )
1196
1195
1197 def _evaluate_expr(self, expr):
1196 def _evaluate_expr(self, expr):
1198 obj = not_found
1197 obj = not_found
1199 done = False
1198 done = False
1200 while not done and expr:
1199 while not done and expr:
1201 try:
1200 try:
1202 obj = guarded_eval(
1201 obj = guarded_eval(
1203 expr,
1202 expr,
1204 EvaluationContext(
1203 EvaluationContext(
1205 globals=self.global_namespace,
1204 globals=self.global_namespace,
1206 locals=self.namespace,
1205 locals=self.namespace,
1207 evaluation=self.evaluation,
1206 evaluation=self.evaluation,
1208 ),
1207 ),
1209 )
1208 )
1210 done = True
1209 done = True
1211 except Exception as e:
1210 except Exception as e:
1212 if self.debug:
1211 if self.debug:
1213 print("Evaluation exception", e)
1212 print("Evaluation exception", e)
1214 # trim the expression to remove any invalid prefix
1213 # trim the expression to remove any invalid prefix
1215 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1214 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1216 # where parenthesis is not closed.
1215 # where parenthesis is not closed.
1217 # TODO: make this faster by reusing parts of the computation?
1216 # TODO: make this faster by reusing parts of the computation?
1218 expr = expr[1:]
1217 expr = expr[1:]
1219 return obj
1218 return obj
1220
1219
1221 def get__all__entries(obj):
1220 def get__all__entries(obj):
1222 """returns the strings in the __all__ attribute"""
1221 """returns the strings in the __all__ attribute"""
1223 try:
1222 try:
1224 words = getattr(obj, '__all__')
1223 words = getattr(obj, '__all__')
1225 except:
1224 except:
1226 return []
1225 return []
1227
1226
1228 return [w for w in words if isinstance(w, str)]
1227 return [w for w in words if isinstance(w, str)]
1229
1228
1230
1229
1231 class _DictKeyState(enum.Flag):
1230 class _DictKeyState(enum.Flag):
1232 """Represent state of the key match in context of other possible matches.
1231 """Represent state of the key match in context of other possible matches.
1233
1232
1234 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1233 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1235 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1234 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1236 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1235 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1237 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1236 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1238 """
1237 """
1239
1238
1240 BASELINE = 0
1239 BASELINE = 0
1241 END_OF_ITEM = enum.auto()
1240 END_OF_ITEM = enum.auto()
1242 END_OF_TUPLE = enum.auto()
1241 END_OF_TUPLE = enum.auto()
1243 IN_TUPLE = enum.auto()
1242 IN_TUPLE = enum.auto()
1244
1243
1245
1244
1246 def _parse_tokens(c):
1245 def _parse_tokens(c):
1247 """Parse tokens even if there is an error."""
1246 """Parse tokens even if there is an error."""
1248 tokens = []
1247 tokens = []
1249 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1248 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1250 while True:
1249 while True:
1251 try:
1250 try:
1252 tokens.append(next(token_generator))
1251 tokens.append(next(token_generator))
1253 except tokenize.TokenError:
1252 except tokenize.TokenError:
1254 return tokens
1253 return tokens
1255 except StopIteration:
1254 except StopIteration:
1256 return tokens
1255 return tokens
1257
1256
1258
1257
1259 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1258 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1260 """Match any valid Python numeric literal in a prefix of dictionary keys.
1259 """Match any valid Python numeric literal in a prefix of dictionary keys.
1261
1260
1262 References:
1261 References:
1263 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1262 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1264 - https://docs.python.org/3/library/tokenize.html
1263 - https://docs.python.org/3/library/tokenize.html
1265 """
1264 """
1266 if prefix[-1].isspace():
1265 if prefix[-1].isspace():
1267 # if user typed a space we do not have anything to complete
1266 # if user typed a space we do not have anything to complete
1268 # even if there was a valid number token before
1267 # even if there was a valid number token before
1269 return None
1268 return None
1270 tokens = _parse_tokens(prefix)
1269 tokens = _parse_tokens(prefix)
1271 rev_tokens = reversed(tokens)
1270 rev_tokens = reversed(tokens)
1272 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1271 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1273 number = None
1272 number = None
1274 for token in rev_tokens:
1273 for token in rev_tokens:
1275 if token.type in skip_over:
1274 if token.type in skip_over:
1276 continue
1275 continue
1277 if number is None:
1276 if number is None:
1278 if token.type == tokenize.NUMBER:
1277 if token.type == tokenize.NUMBER:
1279 number = token.string
1278 number = token.string
1280 continue
1279 continue
1281 else:
1280 else:
1282 # we did not match a number
1281 # we did not match a number
1283 return None
1282 return None
1284 if token.type == tokenize.OP:
1283 if token.type == tokenize.OP:
1285 if token.string == ",":
1284 if token.string == ",":
1286 break
1285 break
1287 if token.string in {"+", "-"}:
1286 if token.string in {"+", "-"}:
1288 number = token.string + number
1287 number = token.string + number
1289 else:
1288 else:
1290 return None
1289 return None
1291 return number
1290 return number
1292
1291
1293
1292
1294 _INT_FORMATS = {
1293 _INT_FORMATS = {
1295 "0b": bin,
1294 "0b": bin,
1296 "0o": oct,
1295 "0o": oct,
1297 "0x": hex,
1296 "0x": hex,
1298 }
1297 }
1299
1298
1300
1299
1301 def match_dict_keys(
1300 def match_dict_keys(
1302 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1301 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1303 prefix: str,
1302 prefix: str,
1304 delims: str,
1303 delims: str,
1305 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1304 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1306 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1305 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1307 """Used by dict_key_matches, matching the prefix to a list of keys
1306 """Used by dict_key_matches, matching the prefix to a list of keys
1308
1307
1309 Parameters
1308 Parameters
1310 ----------
1309 ----------
1311 keys
1310 keys
1312 list of keys in dictionary currently being completed.
1311 list of keys in dictionary currently being completed.
1313 prefix
1312 prefix
1314 Part of the text already typed by the user. E.g. `mydict[b'fo`
1313 Part of the text already typed by the user. E.g. `mydict[b'fo`
1315 delims
1314 delims
1316 String of delimiters to consider when finding the current key.
1315 String of delimiters to consider when finding the current key.
1317 extra_prefix : optional
1316 extra_prefix : optional
1318 Part of the text already typed in multi-key index cases. E.g. for
1317 Part of the text already typed in multi-key index cases. E.g. for
1319 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1318 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1320
1319
1321 Returns
1320 Returns
1322 -------
1321 -------
1323 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1322 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1324 ``quote`` being the quote that need to be used to close current string.
1323 ``quote`` being the quote that need to be used to close current string.
1325 ``token_start`` the position where the replacement should start occurring,
1324 ``token_start`` the position where the replacement should start occurring,
1326 ``matches`` a dictionary of replacement/completion keys on keys and values
1325 ``matches`` a dictionary of replacement/completion keys on keys and values
1327 indicating whether the state.
1326 indicating whether the state.
1328 """
1327 """
1329 prefix_tuple = extra_prefix if extra_prefix else ()
1328 prefix_tuple = extra_prefix if extra_prefix else ()
1330
1329
1331 prefix_tuple_size = sum(
1330 prefix_tuple_size = sum(
1332 [
1331 [
1333 # for pandas, do not count slices as taking space
1332 # for pandas, do not count slices as taking space
1334 not isinstance(k, slice)
1333 not isinstance(k, slice)
1335 for k in prefix_tuple
1334 for k in prefix_tuple
1336 ]
1335 ]
1337 )
1336 )
1338 text_serializable_types = (str, bytes, int, float, slice)
1337 text_serializable_types = (str, bytes, int, float, slice)
1339
1338
1340 def filter_prefix_tuple(key):
1339 def filter_prefix_tuple(key):
1341 # Reject too short keys
1340 # Reject too short keys
1342 if len(key) <= prefix_tuple_size:
1341 if len(key) <= prefix_tuple_size:
1343 return False
1342 return False
1344 # Reject keys which cannot be serialised to text
1343 # Reject keys which cannot be serialised to text
1345 for k in key:
1344 for k in key:
1346 if not isinstance(k, text_serializable_types):
1345 if not isinstance(k, text_serializable_types):
1347 return False
1346 return False
1348 # Reject keys that do not match the prefix
1347 # Reject keys that do not match the prefix
1349 for k, pt in zip(key, prefix_tuple):
1348 for k, pt in zip(key, prefix_tuple):
1350 if k != pt and not isinstance(pt, slice):
1349 if k != pt and not isinstance(pt, slice):
1351 return False
1350 return False
1352 # All checks passed!
1351 # All checks passed!
1353 return True
1352 return True
1354
1353
1355 filtered_key_is_final: Dict[Union[str, bytes, int, float], _DictKeyState] = (
1354 filtered_key_is_final: Dict[Union[str, bytes, int, float], _DictKeyState] = (
1356 defaultdict(lambda: _DictKeyState.BASELINE)
1355 defaultdict(lambda: _DictKeyState.BASELINE)
1357 )
1356 )
1358
1357
1359 for k in keys:
1358 for k in keys:
1360 # If at least one of the matches is not final, mark as undetermined.
1359 # If at least one of the matches is not final, mark as undetermined.
1361 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1360 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1362 # `111` appears final on first match but is not final on the second.
1361 # `111` appears final on first match but is not final on the second.
1363
1362
1364 if isinstance(k, tuple):
1363 if isinstance(k, tuple):
1365 if filter_prefix_tuple(k):
1364 if filter_prefix_tuple(k):
1366 key_fragment = k[prefix_tuple_size]
1365 key_fragment = k[prefix_tuple_size]
1367 filtered_key_is_final[key_fragment] |= (
1366 filtered_key_is_final[key_fragment] |= (
1368 _DictKeyState.END_OF_TUPLE
1367 _DictKeyState.END_OF_TUPLE
1369 if len(k) == prefix_tuple_size + 1
1368 if len(k) == prefix_tuple_size + 1
1370 else _DictKeyState.IN_TUPLE
1369 else _DictKeyState.IN_TUPLE
1371 )
1370 )
1372 elif prefix_tuple_size > 0:
1371 elif prefix_tuple_size > 0:
1373 # we are completing a tuple but this key is not a tuple,
1372 # we are completing a tuple but this key is not a tuple,
1374 # so we should ignore it
1373 # so we should ignore it
1375 pass
1374 pass
1376 else:
1375 else:
1377 if isinstance(k, text_serializable_types):
1376 if isinstance(k, text_serializable_types):
1378 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1377 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1379
1378
1380 filtered_keys = filtered_key_is_final.keys()
1379 filtered_keys = filtered_key_is_final.keys()
1381
1380
1382 if not prefix:
1381 if not prefix:
1383 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1382 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1384
1383
1385 quote_match = re.search("(?:\"|')", prefix)
1384 quote_match = re.search("(?:\"|')", prefix)
1386 is_user_prefix_numeric = False
1385 is_user_prefix_numeric = False
1387
1386
1388 if quote_match:
1387 if quote_match:
1389 quote = quote_match.group()
1388 quote = quote_match.group()
1390 valid_prefix = prefix + quote
1389 valid_prefix = prefix + quote
1391 try:
1390 try:
1392 prefix_str = literal_eval(valid_prefix)
1391 prefix_str = literal_eval(valid_prefix)
1393 except Exception:
1392 except Exception:
1394 return "", 0, {}
1393 return "", 0, {}
1395 else:
1394 else:
1396 # If it does not look like a string, let's assume
1395 # If it does not look like a string, let's assume
1397 # we are dealing with a number or variable.
1396 # we are dealing with a number or variable.
1398 number_match = _match_number_in_dict_key_prefix(prefix)
1397 number_match = _match_number_in_dict_key_prefix(prefix)
1399
1398
1400 # We do not want the key matcher to suggest variable names so we yield:
1399 # We do not want the key matcher to suggest variable names so we yield:
1401 if number_match is None:
1400 if number_match is None:
1402 # The alternative would be to assume that user forgort the quote
1401 # The alternative would be to assume that user forgort the quote
1403 # and if the substring matches, suggest adding it at the start.
1402 # and if the substring matches, suggest adding it at the start.
1404 return "", 0, {}
1403 return "", 0, {}
1405
1404
1406 prefix_str = number_match
1405 prefix_str = number_match
1407 is_user_prefix_numeric = True
1406 is_user_prefix_numeric = True
1408 quote = ""
1407 quote = ""
1409
1408
1410 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1409 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1411 token_match = re.search(pattern, prefix, re.UNICODE)
1410 token_match = re.search(pattern, prefix, re.UNICODE)
1412 assert token_match is not None # silence mypy
1411 assert token_match is not None # silence mypy
1413 token_start = token_match.start()
1412 token_start = token_match.start()
1414 token_prefix = token_match.group()
1413 token_prefix = token_match.group()
1415
1414
1416 matched: Dict[str, _DictKeyState] = {}
1415 matched: Dict[str, _DictKeyState] = {}
1417
1416
1418 str_key: Union[str, bytes]
1417 str_key: Union[str, bytes]
1419
1418
1420 for key in filtered_keys:
1419 for key in filtered_keys:
1421 if isinstance(key, (int, float)):
1420 if isinstance(key, (int, float)):
1422 # User typed a number but this key is not a number.
1421 # User typed a number but this key is not a number.
1423 if not is_user_prefix_numeric:
1422 if not is_user_prefix_numeric:
1424 continue
1423 continue
1425 str_key = str(key)
1424 str_key = str(key)
1426 if isinstance(key, int):
1425 if isinstance(key, int):
1427 int_base = prefix_str[:2].lower()
1426 int_base = prefix_str[:2].lower()
1428 # if user typed integer using binary/oct/hex notation:
1427 # if user typed integer using binary/oct/hex notation:
1429 if int_base in _INT_FORMATS:
1428 if int_base in _INT_FORMATS:
1430 int_format = _INT_FORMATS[int_base]
1429 int_format = _INT_FORMATS[int_base]
1431 str_key = int_format(key)
1430 str_key = int_format(key)
1432 else:
1431 else:
1433 # User typed a string but this key is a number.
1432 # User typed a string but this key is a number.
1434 if is_user_prefix_numeric:
1433 if is_user_prefix_numeric:
1435 continue
1434 continue
1436 str_key = key
1435 str_key = key
1437 try:
1436 try:
1438 if not str_key.startswith(prefix_str):
1437 if not str_key.startswith(prefix_str):
1439 continue
1438 continue
1440 except (AttributeError, TypeError, UnicodeError) as e:
1439 except (AttributeError, TypeError, UnicodeError) as e:
1441 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1440 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1442 continue
1441 continue
1443
1442
1444 # reformat remainder of key to begin with prefix
1443 # reformat remainder of key to begin with prefix
1445 rem = str_key[len(prefix_str) :]
1444 rem = str_key[len(prefix_str) :]
1446 # force repr wrapped in '
1445 # force repr wrapped in '
1447 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1446 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1448 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1447 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1449 if quote == '"':
1448 if quote == '"':
1450 # The entered prefix is quoted with ",
1449 # The entered prefix is quoted with ",
1451 # but the match is quoted with '.
1450 # but the match is quoted with '.
1452 # A contained " hence needs escaping for comparison:
1451 # A contained " hence needs escaping for comparison:
1453 rem_repr = rem_repr.replace('"', '\\"')
1452 rem_repr = rem_repr.replace('"', '\\"')
1454
1453
1455 # then reinsert prefix from start of token
1454 # then reinsert prefix from start of token
1456 match = "%s%s" % (token_prefix, rem_repr)
1455 match = "%s%s" % (token_prefix, rem_repr)
1457
1456
1458 matched[match] = filtered_key_is_final[key]
1457 matched[match] = filtered_key_is_final[key]
1459 return quote, token_start, matched
1458 return quote, token_start, matched
1460
1459
1461
1460
1462 def cursor_to_position(text:str, line:int, column:int)->int:
1461 def cursor_to_position(text:str, line:int, column:int)->int:
1463 """
1462 """
1464 Convert the (line,column) position of the cursor in text to an offset in a
1463 Convert the (line,column) position of the cursor in text to an offset in a
1465 string.
1464 string.
1466
1465
1467 Parameters
1466 Parameters
1468 ----------
1467 ----------
1469 text : str
1468 text : str
1470 The text in which to calculate the cursor offset
1469 The text in which to calculate the cursor offset
1471 line : int
1470 line : int
1472 Line of the cursor; 0-indexed
1471 Line of the cursor; 0-indexed
1473 column : int
1472 column : int
1474 Column of the cursor 0-indexed
1473 Column of the cursor 0-indexed
1475
1474
1476 Returns
1475 Returns
1477 -------
1476 -------
1478 Position of the cursor in ``text``, 0-indexed.
1477 Position of the cursor in ``text``, 0-indexed.
1479
1478
1480 See Also
1479 See Also
1481 --------
1480 --------
1482 position_to_cursor : reciprocal of this function
1481 position_to_cursor : reciprocal of this function
1483
1482
1484 """
1483 """
1485 lines = text.split('\n')
1484 lines = text.split('\n')
1486 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1485 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1487
1486
1488 return sum(len(l) + 1 for l in lines[:line]) + column
1487 return sum(len(l) + 1 for l in lines[:line]) + column
1489
1488
1490 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1489 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1491 """
1490 """
1492 Convert the position of the cursor in text (0 indexed) to a line
1491 Convert the position of the cursor in text (0 indexed) to a line
1493 number(0-indexed) and a column number (0-indexed) pair
1492 number(0-indexed) and a column number (0-indexed) pair
1494
1493
1495 Position should be a valid position in ``text``.
1494 Position should be a valid position in ``text``.
1496
1495
1497 Parameters
1496 Parameters
1498 ----------
1497 ----------
1499 text : str
1498 text : str
1500 The text in which to calculate the cursor offset
1499 The text in which to calculate the cursor offset
1501 offset : int
1500 offset : int
1502 Position of the cursor in ``text``, 0-indexed.
1501 Position of the cursor in ``text``, 0-indexed.
1503
1502
1504 Returns
1503 Returns
1505 -------
1504 -------
1506 (line, column) : (int, int)
1505 (line, column) : (int, int)
1507 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1506 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1508
1507
1509 See Also
1508 See Also
1510 --------
1509 --------
1511 cursor_to_position : reciprocal of this function
1510 cursor_to_position : reciprocal of this function
1512
1511
1513 """
1512 """
1514
1513
1515 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1514 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1516
1515
1517 before = text[:offset]
1516 before = text[:offset]
1518 blines = before.split('\n') # ! splitnes trim trailing \n
1517 blines = before.split('\n') # ! splitnes trim trailing \n
1519 line = before.count('\n')
1518 line = before.count('\n')
1520 col = len(blines[-1])
1519 col = len(blines[-1])
1521 return line, col
1520 return line, col
1522
1521
1523
1522
1524 def _safe_isinstance(obj, module, class_name, *attrs):
1523 def _safe_isinstance(obj, module, class_name, *attrs):
1525 """Checks if obj is an instance of module.class_name if loaded
1524 """Checks if obj is an instance of module.class_name if loaded
1526 """
1525 """
1527 if module in sys.modules:
1526 if module in sys.modules:
1528 m = sys.modules[module]
1527 m = sys.modules[module]
1529 for attr in [class_name, *attrs]:
1528 for attr in [class_name, *attrs]:
1530 m = getattr(m, attr)
1529 m = getattr(m, attr)
1531 return isinstance(obj, m)
1530 return isinstance(obj, m)
1532
1531
1533
1532
1534 @context_matcher()
1533 @context_matcher()
1535 def back_unicode_name_matcher(context: CompletionContext):
1534 def back_unicode_name_matcher(context: CompletionContext):
1536 """Match Unicode characters back to Unicode name
1535 """Match Unicode characters back to Unicode name
1537
1536
1538 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1537 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1539 """
1538 """
1540 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1539 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1541 return _convert_matcher_v1_result_to_v2(
1540 return _convert_matcher_v1_result_to_v2(
1542 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1541 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1543 )
1542 )
1544
1543
1545
1544
1546 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1545 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1547 """Match Unicode characters back to Unicode name
1546 """Match Unicode characters back to Unicode name
1548
1547
1549 This does ``β˜ƒ`` -> ``\\snowman``
1548 This does ``β˜ƒ`` -> ``\\snowman``
1550
1549
1551 Note that snowman is not a valid python3 combining character but will be expanded.
1550 Note that snowman is not a valid python3 combining character but will be expanded.
1552 Though it will not recombine back to the snowman character by the completion machinery.
1551 Though it will not recombine back to the snowman character by the completion machinery.
1553
1552
1554 This will not either back-complete standard sequences like \\n, \\b ...
1553 This will not either back-complete standard sequences like \\n, \\b ...
1555
1554
1556 .. deprecated:: 8.6
1555 .. deprecated:: 8.6
1557 You can use :meth:`back_unicode_name_matcher` instead.
1556 You can use :meth:`back_unicode_name_matcher` instead.
1558
1557
1559 Returns
1558 Returns
1560 =======
1559 =======
1561
1560
1562 Return a tuple with two elements:
1561 Return a tuple with two elements:
1563
1562
1564 - The Unicode character that was matched (preceded with a backslash), or
1563 - The Unicode character that was matched (preceded with a backslash), or
1565 empty string,
1564 empty string,
1566 - a sequence (of 1), name for the match Unicode character, preceded by
1565 - a sequence (of 1), name for the match Unicode character, preceded by
1567 backslash, or empty if no match.
1566 backslash, or empty if no match.
1568 """
1567 """
1569 if len(text)<2:
1568 if len(text)<2:
1570 return '', ()
1569 return '', ()
1571 maybe_slash = text[-2]
1570 maybe_slash = text[-2]
1572 if maybe_slash != '\\':
1571 if maybe_slash != '\\':
1573 return '', ()
1572 return '', ()
1574
1573
1575 char = text[-1]
1574 char = text[-1]
1576 # no expand on quote for completion in strings.
1575 # no expand on quote for completion in strings.
1577 # nor backcomplete standard ascii keys
1576 # nor backcomplete standard ascii keys
1578 if char in string.ascii_letters or char in ('"',"'"):
1577 if char in string.ascii_letters or char in ('"',"'"):
1579 return '', ()
1578 return '', ()
1580 try :
1579 try :
1581 unic = unicodedata.name(char)
1580 unic = unicodedata.name(char)
1582 return '\\'+char,('\\'+unic,)
1581 return '\\'+char,('\\'+unic,)
1583 except KeyError:
1582 except KeyError:
1584 pass
1583 pass
1585 return '', ()
1584 return '', ()
1586
1585
1587
1586
1588 @context_matcher()
1587 @context_matcher()
1589 def back_latex_name_matcher(context: CompletionContext):
1588 def back_latex_name_matcher(context: CompletionContext):
1590 """Match latex characters back to unicode name
1589 """Match latex characters back to unicode name
1591
1590
1592 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1591 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1593 """
1592 """
1594 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1593 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1595 return _convert_matcher_v1_result_to_v2(
1594 return _convert_matcher_v1_result_to_v2(
1596 matches, type="latex", fragment=fragment, suppress_if_matches=True
1595 matches, type="latex", fragment=fragment, suppress_if_matches=True
1597 )
1596 )
1598
1597
1599
1598
1600 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1599 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1601 """Match latex characters back to unicode name
1600 """Match latex characters back to unicode name
1602
1601
1603 This does ``\\β„΅`` -> ``\\aleph``
1602 This does ``\\β„΅`` -> ``\\aleph``
1604
1603
1605 .. deprecated:: 8.6
1604 .. deprecated:: 8.6
1606 You can use :meth:`back_latex_name_matcher` instead.
1605 You can use :meth:`back_latex_name_matcher` instead.
1607 """
1606 """
1608 if len(text)<2:
1607 if len(text)<2:
1609 return '', ()
1608 return '', ()
1610 maybe_slash = text[-2]
1609 maybe_slash = text[-2]
1611 if maybe_slash != '\\':
1610 if maybe_slash != '\\':
1612 return '', ()
1611 return '', ()
1613
1612
1614
1613
1615 char = text[-1]
1614 char = text[-1]
1616 # no expand on quote for completion in strings.
1615 # no expand on quote for completion in strings.
1617 # nor backcomplete standard ascii keys
1616 # nor backcomplete standard ascii keys
1618 if char in string.ascii_letters or char in ('"',"'"):
1617 if char in string.ascii_letters or char in ('"',"'"):
1619 return '', ()
1618 return '', ()
1620 try :
1619 try :
1621 latex = reverse_latex_symbol[char]
1620 latex = reverse_latex_symbol[char]
1622 # '\\' replace the \ as well
1621 # '\\' replace the \ as well
1623 return '\\'+char,[latex]
1622 return '\\'+char,[latex]
1624 except KeyError:
1623 except KeyError:
1625 pass
1624 pass
1626 return '', ()
1625 return '', ()
1627
1626
1628
1627
1629 def _formatparamchildren(parameter) -> str:
1628 def _formatparamchildren(parameter) -> str:
1630 """
1629 """
1631 Get parameter name and value from Jedi Private API
1630 Get parameter name and value from Jedi Private API
1632
1631
1633 Jedi does not expose a simple way to get `param=value` from its API.
1632 Jedi does not expose a simple way to get `param=value` from its API.
1634
1633
1635 Parameters
1634 Parameters
1636 ----------
1635 ----------
1637 parameter
1636 parameter
1638 Jedi's function `Param`
1637 Jedi's function `Param`
1639
1638
1640 Returns
1639 Returns
1641 -------
1640 -------
1642 A string like 'a', 'b=1', '*args', '**kwargs'
1641 A string like 'a', 'b=1', '*args', '**kwargs'
1643
1642
1644 """
1643 """
1645 description = parameter.description
1644 description = parameter.description
1646 if not description.startswith('param '):
1645 if not description.startswith('param '):
1647 raise ValueError('Jedi function parameter description have change format.'
1646 raise ValueError('Jedi function parameter description have change format.'
1648 'Expected "param ...", found %r".' % description)
1647 'Expected "param ...", found %r".' % description)
1649 return description[6:]
1648 return description[6:]
1650
1649
1651 def _make_signature(completion)-> str:
1650 def _make_signature(completion)-> str:
1652 """
1651 """
1653 Make the signature from a jedi completion
1652 Make the signature from a jedi completion
1654
1653
1655 Parameters
1654 Parameters
1656 ----------
1655 ----------
1657 completion : jedi.Completion
1656 completion : jedi.Completion
1658 object does not complete a function type
1657 object does not complete a function type
1659
1658
1660 Returns
1659 Returns
1661 -------
1660 -------
1662 a string consisting of the function signature, with the parenthesis but
1661 a string consisting of the function signature, with the parenthesis but
1663 without the function name. example:
1662 without the function name. example:
1664 `(a, *args, b=1, **kwargs)`
1663 `(a, *args, b=1, **kwargs)`
1665
1664
1666 """
1665 """
1667
1666
1668 # it looks like this might work on jedi 0.17
1667 # it looks like this might work on jedi 0.17
1669 if hasattr(completion, 'get_signatures'):
1668 if hasattr(completion, 'get_signatures'):
1670 signatures = completion.get_signatures()
1669 signatures = completion.get_signatures()
1671 if not signatures:
1670 if not signatures:
1672 return '(?)'
1671 return '(?)'
1673
1672
1674 c0 = completion.get_signatures()[0]
1673 c0 = completion.get_signatures()[0]
1675 return '('+c0.to_string().split('(', maxsplit=1)[1]
1674 return '('+c0.to_string().split('(', maxsplit=1)[1]
1676
1675
1677 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1676 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1678 for p in signature.defined_names()) if f])
1677 for p in signature.defined_names()) if f])
1679
1678
1680
1679
1681 _CompleteResult = Dict[str, MatcherResult]
1680 _CompleteResult = Dict[str, MatcherResult]
1682
1681
1683
1682
1684 DICT_MATCHER_REGEX = re.compile(
1683 DICT_MATCHER_REGEX = re.compile(
1685 r"""(?x)
1684 r"""(?x)
1686 ( # match dict-referring - or any get item object - expression
1685 ( # match dict-referring - or any get item object - expression
1687 .+
1686 .+
1688 )
1687 )
1689 \[ # open bracket
1688 \[ # open bracket
1690 \s* # and optional whitespace
1689 \s* # and optional whitespace
1691 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1690 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1692 # and slices
1691 # and slices
1693 ((?:(?:
1692 ((?:(?:
1694 (?: # closed string
1693 (?: # closed string
1695 [uUbB]? # string prefix (r not handled)
1694 [uUbB]? # string prefix (r not handled)
1696 (?:
1695 (?:
1697 '(?:[^']|(?<!\\)\\')*'
1696 '(?:[^']|(?<!\\)\\')*'
1698 |
1697 |
1699 "(?:[^"]|(?<!\\)\\")*"
1698 "(?:[^"]|(?<!\\)\\")*"
1700 )
1699 )
1701 )
1700 )
1702 |
1701 |
1703 # capture integers and slices
1702 # capture integers and slices
1704 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1703 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1705 |
1704 |
1706 # integer in bin/hex/oct notation
1705 # integer in bin/hex/oct notation
1707 0[bBxXoO]_?(?:\w|\d)+
1706 0[bBxXoO]_?(?:\w|\d)+
1708 )
1707 )
1709 \s*,\s*
1708 \s*,\s*
1710 )*)
1709 )*)
1711 ((?:
1710 ((?:
1712 (?: # unclosed string
1711 (?: # unclosed string
1713 [uUbB]? # string prefix (r not handled)
1712 [uUbB]? # string prefix (r not handled)
1714 (?:
1713 (?:
1715 '(?:[^']|(?<!\\)\\')*
1714 '(?:[^']|(?<!\\)\\')*
1716 |
1715 |
1717 "(?:[^"]|(?<!\\)\\")*
1716 "(?:[^"]|(?<!\\)\\")*
1718 )
1717 )
1719 )
1718 )
1720 |
1719 |
1721 # unfinished integer
1720 # unfinished integer
1722 (?:[-+]?\d+)
1721 (?:[-+]?\d+)
1723 |
1722 |
1724 # integer in bin/hex/oct notation
1723 # integer in bin/hex/oct notation
1725 0[bBxXoO]_?(?:\w|\d)+
1724 0[bBxXoO]_?(?:\w|\d)+
1726 )
1725 )
1727 )?
1726 )?
1728 $
1727 $
1729 """
1728 """
1730 )
1729 )
1731
1730
1732
1731
1733 def _convert_matcher_v1_result_to_v2(
1732 def _convert_matcher_v1_result_to_v2(
1734 matches: Sequence[str],
1733 matches: Sequence[str],
1735 type: str,
1734 type: str,
1736 fragment: Optional[str] = None,
1735 fragment: Optional[str] = None,
1737 suppress_if_matches: bool = False,
1736 suppress_if_matches: bool = False,
1738 ) -> SimpleMatcherResult:
1737 ) -> SimpleMatcherResult:
1739 """Utility to help with transition"""
1738 """Utility to help with transition"""
1740 result = {
1739 result = {
1741 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1740 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1742 "suppress": (True if matches else False) if suppress_if_matches else False,
1741 "suppress": (True if matches else False) if suppress_if_matches else False,
1743 }
1742 }
1744 if fragment is not None:
1743 if fragment is not None:
1745 result["matched_fragment"] = fragment
1744 result["matched_fragment"] = fragment
1746 return cast(SimpleMatcherResult, result)
1745 return cast(SimpleMatcherResult, result)
1747
1746
1748
1747
1749 class IPCompleter(Completer):
1748 class IPCompleter(Completer):
1750 """Extension of the completer class with IPython-specific features"""
1749 """Extension of the completer class with IPython-specific features"""
1751
1750
1752 @observe('greedy')
1751 @observe('greedy')
1753 def _greedy_changed(self, change):
1752 def _greedy_changed(self, change):
1754 """update the splitter and readline delims when greedy is changed"""
1753 """update the splitter and readline delims when greedy is changed"""
1755 if change["new"]:
1754 if change["new"]:
1756 self.evaluation = "unsafe"
1755 self.evaluation = "unsafe"
1757 self.auto_close_dict_keys = True
1756 self.auto_close_dict_keys = True
1758 self.splitter.delims = GREEDY_DELIMS
1757 self.splitter.delims = GREEDY_DELIMS
1759 else:
1758 else:
1760 self.evaluation = "limited"
1759 self.evaluation = "limited"
1761 self.auto_close_dict_keys = False
1760 self.auto_close_dict_keys = False
1762 self.splitter.delims = DELIMS
1761 self.splitter.delims = DELIMS
1763
1762
1764 dict_keys_only = Bool(
1763 dict_keys_only = Bool(
1765 False,
1764 False,
1766 help="""
1765 help="""
1767 Whether to show dict key matches only.
1766 Whether to show dict key matches only.
1768
1767
1769 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1768 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1770 """,
1769 """,
1771 )
1770 )
1772
1771
1773 suppress_competing_matchers = UnionTrait(
1772 suppress_competing_matchers = UnionTrait(
1774 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1773 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1775 default_value=None,
1774 default_value=None,
1776 help="""
1775 help="""
1777 Whether to suppress completions from other *Matchers*.
1776 Whether to suppress completions from other *Matchers*.
1778
1777
1779 When set to ``None`` (default) the matchers will attempt to auto-detect
1778 When set to ``None`` (default) the matchers will attempt to auto-detect
1780 whether suppression of other matchers is desirable. For example, at
1779 whether suppression of other matchers is desirable. For example, at
1781 the beginning of a line followed by `%` we expect a magic completion
1780 the beginning of a line followed by `%` we expect a magic completion
1782 to be the only applicable option, and after ``my_dict['`` we usually
1781 to be the only applicable option, and after ``my_dict['`` we usually
1783 expect a completion with an existing dictionary key.
1782 expect a completion with an existing dictionary key.
1784
1783
1785 If you want to disable this heuristic and see completions from all matchers,
1784 If you want to disable this heuristic and see completions from all matchers,
1786 set ``IPCompleter.suppress_competing_matchers = False``.
1785 set ``IPCompleter.suppress_competing_matchers = False``.
1787 To disable the heuristic for specific matchers provide a dictionary mapping:
1786 To disable the heuristic for specific matchers provide a dictionary mapping:
1788 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1787 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1789
1788
1790 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1789 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1791 completions to the set of matchers with the highest priority;
1790 completions to the set of matchers with the highest priority;
1792 this is equivalent to ``IPCompleter.merge_completions`` and
1791 this is equivalent to ``IPCompleter.merge_completions`` and
1793 can be beneficial for performance, but will sometimes omit relevant
1792 can be beneficial for performance, but will sometimes omit relevant
1794 candidates from matchers further down the priority list.
1793 candidates from matchers further down the priority list.
1795 """,
1794 """,
1796 ).tag(config=True)
1795 ).tag(config=True)
1797
1796
1798 merge_completions = Bool(
1797 merge_completions = Bool(
1799 True,
1798 True,
1800 help="""Whether to merge completion results into a single list
1799 help="""Whether to merge completion results into a single list
1801
1800
1802 If False, only the completion results from the first non-empty
1801 If False, only the completion results from the first non-empty
1803 completer will be returned.
1802 completer will be returned.
1804
1803
1805 As of version 8.6.0, setting the value to ``False`` is an alias for:
1804 As of version 8.6.0, setting the value to ``False`` is an alias for:
1806 ``IPCompleter.suppress_competing_matchers = True.``.
1805 ``IPCompleter.suppress_competing_matchers = True.``.
1807 """,
1806 """,
1808 ).tag(config=True)
1807 ).tag(config=True)
1809
1808
1810 disable_matchers = ListTrait(
1809 disable_matchers = ListTrait(
1811 Unicode(),
1810 Unicode(),
1812 help="""List of matchers to disable.
1811 help="""List of matchers to disable.
1813
1812
1814 The list should contain matcher identifiers (see :any:`completion_matcher`).
1813 The list should contain matcher identifiers (see :any:`completion_matcher`).
1815 """,
1814 """,
1816 ).tag(config=True)
1815 ).tag(config=True)
1817
1816
1818 omit__names = Enum(
1817 omit__names = Enum(
1819 (0, 1, 2),
1818 (0, 1, 2),
1820 default_value=2,
1819 default_value=2,
1821 help="""Instruct the completer to omit private method names
1820 help="""Instruct the completer to omit private method names
1822
1821
1823 Specifically, when completing on ``object.<tab>``.
1822 Specifically, when completing on ``object.<tab>``.
1824
1823
1825 When 2 [default]: all names that start with '_' will be excluded.
1824 When 2 [default]: all names that start with '_' will be excluded.
1826
1825
1827 When 1: all 'magic' names (``__foo__``) will be excluded.
1826 When 1: all 'magic' names (``__foo__``) will be excluded.
1828
1827
1829 When 0: nothing will be excluded.
1828 When 0: nothing will be excluded.
1830 """
1829 """
1831 ).tag(config=True)
1830 ).tag(config=True)
1832 limit_to__all__ = Bool(False,
1831 limit_to__all__ = Bool(False,
1833 help="""
1832 help="""
1834 DEPRECATED as of version 5.0.
1833 DEPRECATED as of version 5.0.
1835
1834
1836 Instruct the completer to use __all__ for the completion
1835 Instruct the completer to use __all__ for the completion
1837
1836
1838 Specifically, when completing on ``object.<tab>``.
1837 Specifically, when completing on ``object.<tab>``.
1839
1838
1840 When True: only those names in obj.__all__ will be included.
1839 When True: only those names in obj.__all__ will be included.
1841
1840
1842 When False [default]: the __all__ attribute is ignored
1841 When False [default]: the __all__ attribute is ignored
1843 """,
1842 """,
1844 ).tag(config=True)
1843 ).tag(config=True)
1845
1844
1846 profile_completions = Bool(
1845 profile_completions = Bool(
1847 default_value=False,
1846 default_value=False,
1848 help="If True, emit profiling data for completion subsystem using cProfile."
1847 help="If True, emit profiling data for completion subsystem using cProfile."
1849 ).tag(config=True)
1848 ).tag(config=True)
1850
1849
1851 profiler_output_dir = Unicode(
1850 profiler_output_dir = Unicode(
1852 default_value=".completion_profiles",
1851 default_value=".completion_profiles",
1853 help="Template for path at which to output profile data for completions."
1852 help="Template for path at which to output profile data for completions."
1854 ).tag(config=True)
1853 ).tag(config=True)
1855
1854
1856 @observe('limit_to__all__')
1855 @observe('limit_to__all__')
1857 def _limit_to_all_changed(self, change):
1856 def _limit_to_all_changed(self, change):
1858 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1857 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1859 'value has been deprecated since IPython 5.0, will be made to have '
1858 'value has been deprecated since IPython 5.0, will be made to have '
1860 'no effects and then removed in future version of IPython.',
1859 'no effects and then removed in future version of IPython.',
1861 UserWarning)
1860 UserWarning)
1862
1861
1863 def __init__(
1862 def __init__(
1864 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1863 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1865 ):
1864 ):
1866 """IPCompleter() -> completer
1865 """IPCompleter() -> completer
1867
1866
1868 Return a completer object.
1867 Return a completer object.
1869
1868
1870 Parameters
1869 Parameters
1871 ----------
1870 ----------
1872 shell
1871 shell
1873 a pointer to the ipython shell itself. This is needed
1872 a pointer to the ipython shell itself. This is needed
1874 because this completer knows about magic functions, and those can
1873 because this completer knows about magic functions, and those can
1875 only be accessed via the ipython instance.
1874 only be accessed via the ipython instance.
1876 namespace : dict, optional
1875 namespace : dict, optional
1877 an optional dict where completions are performed.
1876 an optional dict where completions are performed.
1878 global_namespace : dict, optional
1877 global_namespace : dict, optional
1879 secondary optional dict for completions, to
1878 secondary optional dict for completions, to
1880 handle cases (such as IPython embedded inside functions) where
1879 handle cases (such as IPython embedded inside functions) where
1881 both Python scopes are visible.
1880 both Python scopes are visible.
1882 config : Config
1881 config : Config
1883 traitlet's config object
1882 traitlet's config object
1884 **kwargs
1883 **kwargs
1885 passed to super class unmodified.
1884 passed to super class unmodified.
1886 """
1885 """
1887
1886
1888 self.magic_escape = ESC_MAGIC
1887 self.magic_escape = ESC_MAGIC
1889 self.splitter = CompletionSplitter()
1888 self.splitter = CompletionSplitter()
1890
1889
1891 # _greedy_changed() depends on splitter and readline being defined:
1890 # _greedy_changed() depends on splitter and readline being defined:
1892 super().__init__(
1891 super().__init__(
1893 namespace=namespace,
1892 namespace=namespace,
1894 global_namespace=global_namespace,
1893 global_namespace=global_namespace,
1895 config=config,
1894 config=config,
1896 **kwargs,
1895 **kwargs,
1897 )
1896 )
1898
1897
1899 # List where completion matches will be stored
1898 # List where completion matches will be stored
1900 self.matches = []
1899 self.matches = []
1901 self.shell = shell
1900 self.shell = shell
1902 # Regexp to split filenames with spaces in them
1901 # Regexp to split filenames with spaces in them
1903 self.space_name_re = re.compile(r'([^\\] )')
1902 self.space_name_re = re.compile(r'([^\\] )')
1904 # Hold a local ref. to glob.glob for speed
1903 # Hold a local ref. to glob.glob for speed
1905 self.glob = glob.glob
1904 self.glob = glob.glob
1906
1905
1907 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1906 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1908 # buffers, to avoid completion problems.
1907 # buffers, to avoid completion problems.
1909 term = os.environ.get('TERM','xterm')
1908 term = os.environ.get('TERM','xterm')
1910 self.dumb_terminal = term in ['dumb','emacs']
1909 self.dumb_terminal = term in ['dumb','emacs']
1911
1910
1912 # Special handling of backslashes needed in win32 platforms
1911 # Special handling of backslashes needed in win32 platforms
1913 if sys.platform == "win32":
1912 if sys.platform == "win32":
1914 self.clean_glob = self._clean_glob_win32
1913 self.clean_glob = self._clean_glob_win32
1915 else:
1914 else:
1916 self.clean_glob = self._clean_glob
1915 self.clean_glob = self._clean_glob
1917
1916
1918 #regexp to parse docstring for function signature
1917 #regexp to parse docstring for function signature
1919 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1918 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1920 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1919 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1921 #use this if positional argument name is also needed
1920 #use this if positional argument name is also needed
1922 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1921 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1923
1922
1924 self.magic_arg_matchers = [
1923 self.magic_arg_matchers = [
1925 self.magic_config_matcher,
1924 self.magic_config_matcher,
1926 self.magic_color_matcher,
1925 self.magic_color_matcher,
1927 ]
1926 ]
1928
1927
1929 # This is set externally by InteractiveShell
1928 # This is set externally by InteractiveShell
1930 self.custom_completers = None
1929 self.custom_completers = None
1931
1930
1932 # This is a list of names of unicode characters that can be completed
1931 # This is a list of names of unicode characters that can be completed
1933 # into their corresponding unicode value. The list is large, so we
1932 # into their corresponding unicode value. The list is large, so we
1934 # lazily initialize it on first use. Consuming code should access this
1933 # lazily initialize it on first use. Consuming code should access this
1935 # attribute through the `@unicode_names` property.
1934 # attribute through the `@unicode_names` property.
1936 self._unicode_names = None
1935 self._unicode_names = None
1937
1936
1938 self._backslash_combining_matchers = [
1937 self._backslash_combining_matchers = [
1939 self.latex_name_matcher,
1938 self.latex_name_matcher,
1940 self.unicode_name_matcher,
1939 self.unicode_name_matcher,
1941 back_latex_name_matcher,
1940 back_latex_name_matcher,
1942 back_unicode_name_matcher,
1941 back_unicode_name_matcher,
1943 self.fwd_unicode_matcher,
1942 self.fwd_unicode_matcher,
1944 ]
1943 ]
1945
1944
1946 if not self.backslash_combining_completions:
1945 if not self.backslash_combining_completions:
1947 for matcher in self._backslash_combining_matchers:
1946 for matcher in self._backslash_combining_matchers:
1948 self.disable_matchers.append(_get_matcher_id(matcher))
1947 self.disable_matchers.append(_get_matcher_id(matcher))
1949
1948
1950 if not self.merge_completions:
1949 if not self.merge_completions:
1951 self.suppress_competing_matchers = True
1950 self.suppress_competing_matchers = True
1952
1951
1953 @property
1952 @property
1954 def matchers(self) -> List[Matcher]:
1953 def matchers(self) -> List[Matcher]:
1955 """All active matcher routines for completion"""
1954 """All active matcher routines for completion"""
1956 if self.dict_keys_only:
1955 if self.dict_keys_only:
1957 return [self.dict_key_matcher]
1956 return [self.dict_key_matcher]
1958
1957
1959 if self.use_jedi:
1958 if self.use_jedi:
1960 return [
1959 return [
1961 *self.custom_matchers,
1960 *self.custom_matchers,
1962 *self._backslash_combining_matchers,
1961 *self._backslash_combining_matchers,
1963 *self.magic_arg_matchers,
1962 *self.magic_arg_matchers,
1964 self.custom_completer_matcher,
1963 self.custom_completer_matcher,
1965 self.magic_matcher,
1964 self.magic_matcher,
1966 self._jedi_matcher,
1965 self._jedi_matcher,
1967 self.dict_key_matcher,
1966 self.dict_key_matcher,
1968 self.file_matcher,
1967 self.file_matcher,
1969 ]
1968 ]
1970 else:
1969 else:
1971 return [
1970 return [
1972 *self.custom_matchers,
1971 *self.custom_matchers,
1973 *self._backslash_combining_matchers,
1972 *self._backslash_combining_matchers,
1974 *self.magic_arg_matchers,
1973 *self.magic_arg_matchers,
1975 self.custom_completer_matcher,
1974 self.custom_completer_matcher,
1976 self.dict_key_matcher,
1975 self.dict_key_matcher,
1977 self.magic_matcher,
1976 self.magic_matcher,
1978 self.python_matcher,
1977 self.python_matcher,
1979 self.file_matcher,
1978 self.file_matcher,
1980 self.python_func_kw_matcher,
1979 self.python_func_kw_matcher,
1981 ]
1980 ]
1982
1981
1983 def all_completions(self, text:str) -> List[str]:
1982 def all_completions(self, text:str) -> List[str]:
1984 """
1983 """
1985 Wrapper around the completion methods for the benefit of emacs.
1984 Wrapper around the completion methods for the benefit of emacs.
1986 """
1985 """
1987 prefix = text.rpartition('.')[0]
1986 prefix = text.rpartition('.')[0]
1988 with provisionalcompleter():
1987 with provisionalcompleter():
1989 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1988 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1990 for c in self.completions(text, len(text))]
1989 for c in self.completions(text, len(text))]
1991
1990
1992 return self.complete(text)[1]
1991 return self.complete(text)[1]
1993
1992
1994 def _clean_glob(self, text:str):
1993 def _clean_glob(self, text:str):
1995 return self.glob("%s*" % text)
1994 return self.glob("%s*" % text)
1996
1995
1997 def _clean_glob_win32(self, text:str):
1996 def _clean_glob_win32(self, text:str):
1998 return [f.replace("\\","/")
1997 return [f.replace("\\","/")
1999 for f in self.glob("%s*" % text)]
1998 for f in self.glob("%s*" % text)]
2000
1999
2001 @context_matcher()
2000 @context_matcher()
2002 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2001 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2003 """Same as :any:`file_matches`, but adopted to new Matcher API."""
2002 """Same as :any:`file_matches`, but adopted to new Matcher API."""
2004 matches = self.file_matches(context.token)
2003 matches = self.file_matches(context.token)
2005 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
2004 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
2006 # starts with `/home/`, `C:\`, etc)
2005 # starts with `/home/`, `C:\`, etc)
2007 return _convert_matcher_v1_result_to_v2(matches, type="path")
2006 return _convert_matcher_v1_result_to_v2(matches, type="path")
2008
2007
2009 def file_matches(self, text: str) -> List[str]:
2008 def file_matches(self, text: str) -> List[str]:
2010 """Match filenames, expanding ~USER type strings.
2009 """Match filenames, expanding ~USER type strings.
2011
2010
2012 Most of the seemingly convoluted logic in this completer is an
2011 Most of the seemingly convoluted logic in this completer is an
2013 attempt to handle filenames with spaces in them. And yet it's not
2012 attempt to handle filenames with spaces in them. And yet it's not
2014 quite perfect, because Python's readline doesn't expose all of the
2013 quite perfect, because Python's readline doesn't expose all of the
2015 GNU readline details needed for this to be done correctly.
2014 GNU readline details needed for this to be done correctly.
2016
2015
2017 For a filename with a space in it, the printed completions will be
2016 For a filename with a space in it, the printed completions will be
2018 only the parts after what's already been typed (instead of the
2017 only the parts after what's already been typed (instead of the
2019 full completions, as is normally done). I don't think with the
2018 full completions, as is normally done). I don't think with the
2020 current (as of Python 2.3) Python readline it's possible to do
2019 current (as of Python 2.3) Python readline it's possible to do
2021 better.
2020 better.
2022
2021
2023 .. deprecated:: 8.6
2022 .. deprecated:: 8.6
2024 You can use :meth:`file_matcher` instead.
2023 You can use :meth:`file_matcher` instead.
2025 """
2024 """
2026
2025
2027 # chars that require escaping with backslash - i.e. chars
2026 # chars that require escaping with backslash - i.e. chars
2028 # that readline treats incorrectly as delimiters, but we
2027 # that readline treats incorrectly as delimiters, but we
2029 # don't want to treat as delimiters in filename matching
2028 # don't want to treat as delimiters in filename matching
2030 # when escaped with backslash
2029 # when escaped with backslash
2031 if text.startswith('!'):
2030 if text.startswith('!'):
2032 text = text[1:]
2031 text = text[1:]
2033 text_prefix = u'!'
2032 text_prefix = u'!'
2034 else:
2033 else:
2035 text_prefix = u''
2034 text_prefix = u''
2036
2035
2037 text_until_cursor = self.text_until_cursor
2036 text_until_cursor = self.text_until_cursor
2038 # track strings with open quotes
2037 # track strings with open quotes
2039 open_quotes = has_open_quotes(text_until_cursor)
2038 open_quotes = has_open_quotes(text_until_cursor)
2040
2039
2041 if '(' in text_until_cursor or '[' in text_until_cursor:
2040 if '(' in text_until_cursor or '[' in text_until_cursor:
2042 lsplit = text
2041 lsplit = text
2043 else:
2042 else:
2044 try:
2043 try:
2045 # arg_split ~ shlex.split, but with unicode bugs fixed by us
2044 # arg_split ~ shlex.split, but with unicode bugs fixed by us
2046 lsplit = arg_split(text_until_cursor)[-1]
2045 lsplit = arg_split(text_until_cursor)[-1]
2047 except ValueError:
2046 except ValueError:
2048 # typically an unmatched ", or backslash without escaped char.
2047 # typically an unmatched ", or backslash without escaped char.
2049 if open_quotes:
2048 if open_quotes:
2050 lsplit = text_until_cursor.split(open_quotes)[-1]
2049 lsplit = text_until_cursor.split(open_quotes)[-1]
2051 else:
2050 else:
2052 return []
2051 return []
2053 except IndexError:
2052 except IndexError:
2054 # tab pressed on empty line
2053 # tab pressed on empty line
2055 lsplit = ""
2054 lsplit = ""
2056
2055
2057 if not open_quotes and lsplit != protect_filename(lsplit):
2056 if not open_quotes and lsplit != protect_filename(lsplit):
2058 # if protectables are found, do matching on the whole escaped name
2057 # if protectables are found, do matching on the whole escaped name
2059 has_protectables = True
2058 has_protectables = True
2060 text0,text = text,lsplit
2059 text0,text = text,lsplit
2061 else:
2060 else:
2062 has_protectables = False
2061 has_protectables = False
2063 text = os.path.expanduser(text)
2062 text = os.path.expanduser(text)
2064
2063
2065 if text == "":
2064 if text == "":
2066 return [text_prefix + protect_filename(f) for f in self.glob("*")]
2065 return [text_prefix + protect_filename(f) for f in self.glob("*")]
2067
2066
2068 # Compute the matches from the filesystem
2067 # Compute the matches from the filesystem
2069 if sys.platform == 'win32':
2068 if sys.platform == 'win32':
2070 m0 = self.clean_glob(text)
2069 m0 = self.clean_glob(text)
2071 else:
2070 else:
2072 m0 = self.clean_glob(text.replace('\\', ''))
2071 m0 = self.clean_glob(text.replace('\\', ''))
2073
2072
2074 if has_protectables:
2073 if has_protectables:
2075 # If we had protectables, we need to revert our changes to the
2074 # If we had protectables, we need to revert our changes to the
2076 # beginning of filename so that we don't double-write the part
2075 # beginning of filename so that we don't double-write the part
2077 # of the filename we have so far
2076 # of the filename we have so far
2078 len_lsplit = len(lsplit)
2077 len_lsplit = len(lsplit)
2079 matches = [text_prefix + text0 +
2078 matches = [text_prefix + text0 +
2080 protect_filename(f[len_lsplit:]) for f in m0]
2079 protect_filename(f[len_lsplit:]) for f in m0]
2081 else:
2080 else:
2082 if open_quotes:
2081 if open_quotes:
2083 # if we have a string with an open quote, we don't need to
2082 # if we have a string with an open quote, we don't need to
2084 # protect the names beyond the quote (and we _shouldn't_, as
2083 # protect the names beyond the quote (and we _shouldn't_, as
2085 # it would cause bugs when the filesystem call is made).
2084 # it would cause bugs when the filesystem call is made).
2086 matches = m0 if sys.platform == "win32" else\
2085 matches = m0 if sys.platform == "win32" else\
2087 [protect_filename(f, open_quotes) for f in m0]
2086 [protect_filename(f, open_quotes) for f in m0]
2088 else:
2087 else:
2089 matches = [text_prefix +
2088 matches = [text_prefix +
2090 protect_filename(f) for f in m0]
2089 protect_filename(f) for f in m0]
2091
2090
2092 # Mark directories in input list by appending '/' to their names.
2091 # Mark directories in input list by appending '/' to their names.
2093 return [x+'/' if os.path.isdir(x) else x for x in matches]
2092 return [x+'/' if os.path.isdir(x) else x for x in matches]
2094
2093
2095 @context_matcher()
2094 @context_matcher()
2096 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2095 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2097 """Match magics."""
2096 """Match magics."""
2098 text = context.token
2097 text = context.token
2099 matches = self.magic_matches(text)
2098 matches = self.magic_matches(text)
2100 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
2099 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
2101 is_magic_prefix = len(text) > 0 and text[0] == "%"
2100 is_magic_prefix = len(text) > 0 and text[0] == "%"
2102 result["suppress"] = is_magic_prefix and bool(result["completions"])
2101 result["suppress"] = is_magic_prefix and bool(result["completions"])
2103 return result
2102 return result
2104
2103
2105 def magic_matches(self, text: str):
2104 def magic_matches(self, text: str):
2106 """Match magics.
2105 """Match magics.
2107
2106
2108 .. deprecated:: 8.6
2107 .. deprecated:: 8.6
2109 You can use :meth:`magic_matcher` instead.
2108 You can use :meth:`magic_matcher` instead.
2110 """
2109 """
2111 # Get all shell magics now rather than statically, so magics loaded at
2110 # Get all shell magics now rather than statically, so magics loaded at
2112 # runtime show up too.
2111 # runtime show up too.
2113 lsm = self.shell.magics_manager.lsmagic()
2112 lsm = self.shell.magics_manager.lsmagic()
2114 line_magics = lsm['line']
2113 line_magics = lsm['line']
2115 cell_magics = lsm['cell']
2114 cell_magics = lsm['cell']
2116 pre = self.magic_escape
2115 pre = self.magic_escape
2117 pre2 = pre+pre
2116 pre2 = pre+pre
2118
2117
2119 explicit_magic = text.startswith(pre)
2118 explicit_magic = text.startswith(pre)
2120
2119
2121 # Completion logic:
2120 # Completion logic:
2122 # - user gives %%: only do cell magics
2121 # - user gives %%: only do cell magics
2123 # - user gives %: do both line and cell magics
2122 # - user gives %: do both line and cell magics
2124 # - no prefix: do both
2123 # - no prefix: do both
2125 # In other words, line magics are skipped if the user gives %% explicitly
2124 # In other words, line magics are skipped if the user gives %% explicitly
2126 #
2125 #
2127 # We also exclude magics that match any currently visible names:
2126 # We also exclude magics that match any currently visible names:
2128 # https://github.com/ipython/ipython/issues/4877, unless the user has
2127 # https://github.com/ipython/ipython/issues/4877, unless the user has
2129 # typed a %:
2128 # typed a %:
2130 # https://github.com/ipython/ipython/issues/10754
2129 # https://github.com/ipython/ipython/issues/10754
2131 bare_text = text.lstrip(pre)
2130 bare_text = text.lstrip(pre)
2132 global_matches = self.global_matches(bare_text)
2131 global_matches = self.global_matches(bare_text)
2133 if not explicit_magic:
2132 if not explicit_magic:
2134 def matches(magic):
2133 def matches(magic):
2135 """
2134 """
2136 Filter magics, in particular remove magics that match
2135 Filter magics, in particular remove magics that match
2137 a name present in global namespace.
2136 a name present in global namespace.
2138 """
2137 """
2139 return ( magic.startswith(bare_text) and
2138 return ( magic.startswith(bare_text) and
2140 magic not in global_matches )
2139 magic not in global_matches )
2141 else:
2140 else:
2142 def matches(magic):
2141 def matches(magic):
2143 return magic.startswith(bare_text)
2142 return magic.startswith(bare_text)
2144
2143
2145 comp = [ pre2+m for m in cell_magics if matches(m)]
2144 comp = [ pre2+m for m in cell_magics if matches(m)]
2146 if not text.startswith(pre2):
2145 if not text.startswith(pre2):
2147 comp += [ pre+m for m in line_magics if matches(m)]
2146 comp += [ pre+m for m in line_magics if matches(m)]
2148
2147
2149 return comp
2148 return comp
2150
2149
2151 @context_matcher()
2150 @context_matcher()
2152 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2151 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2153 """Match class names and attributes for %config magic."""
2152 """Match class names and attributes for %config magic."""
2154 # NOTE: uses `line_buffer` equivalent for compatibility
2153 # NOTE: uses `line_buffer` equivalent for compatibility
2155 matches = self.magic_config_matches(context.line_with_cursor)
2154 matches = self.magic_config_matches(context.line_with_cursor)
2156 return _convert_matcher_v1_result_to_v2(matches, type="param")
2155 return _convert_matcher_v1_result_to_v2(matches, type="param")
2157
2156
2158 def magic_config_matches(self, text: str) -> List[str]:
2157 def magic_config_matches(self, text: str) -> List[str]:
2159 """Match class names and attributes for %config magic.
2158 """Match class names and attributes for %config magic.
2160
2159
2161 .. deprecated:: 8.6
2160 .. deprecated:: 8.6
2162 You can use :meth:`magic_config_matcher` instead.
2161 You can use :meth:`magic_config_matcher` instead.
2163 """
2162 """
2164 texts = text.strip().split()
2163 texts = text.strip().split()
2165
2164
2166 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
2165 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
2167 # get all configuration classes
2166 # get all configuration classes
2168 classes = sorted(set([ c for c in self.shell.configurables
2167 classes = sorted(set([ c for c in self.shell.configurables
2169 if c.__class__.class_traits(config=True)
2168 if c.__class__.class_traits(config=True)
2170 ]), key=lambda x: x.__class__.__name__)
2169 ]), key=lambda x: x.__class__.__name__)
2171 classnames = [ c.__class__.__name__ for c in classes ]
2170 classnames = [ c.__class__.__name__ for c in classes ]
2172
2171
2173 # return all classnames if config or %config is given
2172 # return all classnames if config or %config is given
2174 if len(texts) == 1:
2173 if len(texts) == 1:
2175 return classnames
2174 return classnames
2176
2175
2177 # match classname
2176 # match classname
2178 classname_texts = texts[1].split('.')
2177 classname_texts = texts[1].split('.')
2179 classname = classname_texts[0]
2178 classname = classname_texts[0]
2180 classname_matches = [ c for c in classnames
2179 classname_matches = [ c for c in classnames
2181 if c.startswith(classname) ]
2180 if c.startswith(classname) ]
2182
2181
2183 # return matched classes or the matched class with attributes
2182 # return matched classes or the matched class with attributes
2184 if texts[1].find('.') < 0:
2183 if texts[1].find('.') < 0:
2185 return classname_matches
2184 return classname_matches
2186 elif len(classname_matches) == 1 and \
2185 elif len(classname_matches) == 1 and \
2187 classname_matches[0] == classname:
2186 classname_matches[0] == classname:
2188 cls = classes[classnames.index(classname)].__class__
2187 cls = classes[classnames.index(classname)].__class__
2189 help = cls.class_get_help()
2188 help = cls.class_get_help()
2190 # strip leading '--' from cl-args:
2189 # strip leading '--' from cl-args:
2191 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
2190 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
2192 return [ attr.split('=')[0]
2191 return [ attr.split('=')[0]
2193 for attr in help.strip().splitlines()
2192 for attr in help.strip().splitlines()
2194 if attr.startswith(texts[1]) ]
2193 if attr.startswith(texts[1]) ]
2195 return []
2194 return []
2196
2195
2197 @context_matcher()
2196 @context_matcher()
2198 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2197 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2199 """Match color schemes for %colors magic."""
2198 """Match color schemes for %colors magic."""
2200 # NOTE: uses `line_buffer` equivalent for compatibility
2199 # NOTE: uses `line_buffer` equivalent for compatibility
2201 matches = self.magic_color_matches(context.line_with_cursor)
2200 matches = self.magic_color_matches(context.line_with_cursor)
2202 return _convert_matcher_v1_result_to_v2(matches, type="param")
2201 return _convert_matcher_v1_result_to_v2(matches, type="param")
2203
2202
2204 def magic_color_matches(self, text: str) -> List[str]:
2203 def magic_color_matches(self, text: str) -> List[str]:
2205 """Match color schemes for %colors magic.
2204 """Match color schemes for %colors magic.
2206
2205
2207 .. deprecated:: 8.6
2206 .. deprecated:: 8.6
2208 You can use :meth:`magic_color_matcher` instead.
2207 You can use :meth:`magic_color_matcher` instead.
2209 """
2208 """
2210 texts = text.split()
2209 texts = text.split()
2211 if text.endswith(' '):
2210 if text.endswith(' '):
2212 # .split() strips off the trailing whitespace. Add '' back
2211 # .split() strips off the trailing whitespace. Add '' back
2213 # so that: '%colors ' -> ['%colors', '']
2212 # so that: '%colors ' -> ['%colors', '']
2214 texts.append('')
2213 texts.append('')
2215
2214
2216 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
2215 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
2217 prefix = texts[1]
2216 prefix = texts[1]
2218 return [ color for color in InspectColors.keys()
2217 return [ color for color in InspectColors.keys()
2219 if color.startswith(prefix) ]
2218 if color.startswith(prefix) ]
2220 return []
2219 return []
2221
2220
2222 @context_matcher(identifier="IPCompleter.jedi_matcher")
2221 @context_matcher(identifier="IPCompleter.jedi_matcher")
2223 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
2222 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
2224 matches = self._jedi_matches(
2223 matches = self._jedi_matches(
2225 cursor_column=context.cursor_position,
2224 cursor_column=context.cursor_position,
2226 cursor_line=context.cursor_line,
2225 cursor_line=context.cursor_line,
2227 text=context.full_text,
2226 text=context.full_text,
2228 )
2227 )
2229 return {
2228 return {
2230 "completions": matches,
2229 "completions": matches,
2231 # static analysis should not suppress other matchers
2230 # static analysis should not suppress other matchers
2232 "suppress": False,
2231 "suppress": False,
2233 }
2232 }
2234
2233
2235 def _jedi_matches(
2234 def _jedi_matches(
2236 self, cursor_column: int, cursor_line: int, text: str
2235 self, cursor_column: int, cursor_line: int, text: str
2237 ) -> Iterator[_JediCompletionLike]:
2236 ) -> Iterator[_JediCompletionLike]:
2238 """
2237 """
2239 Return a list of :any:`jedi.api.Completion`\\s object from a ``text`` and
2238 Return a list of :any:`jedi.api.Completion`\\s object from a ``text`` and
2240 cursor position.
2239 cursor position.
2241
2240
2242 Parameters
2241 Parameters
2243 ----------
2242 ----------
2244 cursor_column : int
2243 cursor_column : int
2245 column position of the cursor in ``text``, 0-indexed.
2244 column position of the cursor in ``text``, 0-indexed.
2246 cursor_line : int
2245 cursor_line : int
2247 line position of the cursor in ``text``, 0-indexed
2246 line position of the cursor in ``text``, 0-indexed
2248 text : str
2247 text : str
2249 text to complete
2248 text to complete
2250
2249
2251 Notes
2250 Notes
2252 -----
2251 -----
2253 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
2252 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
2254 object containing a string with the Jedi debug information attached.
2253 object containing a string with the Jedi debug information attached.
2255
2254
2256 .. deprecated:: 8.6
2255 .. deprecated:: 8.6
2257 You can use :meth:`_jedi_matcher` instead.
2256 You can use :meth:`_jedi_matcher` instead.
2258 """
2257 """
2259 namespaces = [self.namespace]
2258 namespaces = [self.namespace]
2260 if self.global_namespace is not None:
2259 if self.global_namespace is not None:
2261 namespaces.append(self.global_namespace)
2260 namespaces.append(self.global_namespace)
2262
2261
2263 completion_filter = lambda x:x
2262 completion_filter = lambda x:x
2264 offset = cursor_to_position(text, cursor_line, cursor_column)
2263 offset = cursor_to_position(text, cursor_line, cursor_column)
2265 # filter output if we are completing for object members
2264 # filter output if we are completing for object members
2266 if offset:
2265 if offset:
2267 pre = text[offset-1]
2266 pre = text[offset-1]
2268 if pre == '.':
2267 if pre == '.':
2269 if self.omit__names == 2:
2268 if self.omit__names == 2:
2270 completion_filter = lambda c:not c.name.startswith('_')
2269 completion_filter = lambda c:not c.name.startswith('_')
2271 elif self.omit__names == 1:
2270 elif self.omit__names == 1:
2272 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
2271 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
2273 elif self.omit__names == 0:
2272 elif self.omit__names == 0:
2274 completion_filter = lambda x:x
2273 completion_filter = lambda x:x
2275 else:
2274 else:
2276 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
2275 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
2277
2276
2278 interpreter = jedi.Interpreter(text[:offset], namespaces)
2277 interpreter = jedi.Interpreter(text[:offset], namespaces)
2279 try_jedi = True
2278 try_jedi = True
2280
2279
2281 try:
2280 try:
2282 # find the first token in the current tree -- if it is a ' or " then we are in a string
2281 # find the first token in the current tree -- if it is a ' or " then we are in a string
2283 completing_string = False
2282 completing_string = False
2284 try:
2283 try:
2285 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
2284 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
2286 except StopIteration:
2285 except StopIteration:
2287 pass
2286 pass
2288 else:
2287 else:
2289 # note the value may be ', ", or it may also be ''' or """, or
2288 # note the value may be ', ", or it may also be ''' or """, or
2290 # in some cases, """what/you/typed..., but all of these are
2289 # in some cases, """what/you/typed..., but all of these are
2291 # strings.
2290 # strings.
2292 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
2291 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
2293
2292
2294 # if we are in a string jedi is likely not the right candidate for
2293 # if we are in a string jedi is likely not the right candidate for
2295 # now. Skip it.
2294 # now. Skip it.
2296 try_jedi = not completing_string
2295 try_jedi = not completing_string
2297 except Exception as e:
2296 except Exception as e:
2298 # many of things can go wrong, we are using private API just don't crash.
2297 # many of things can go wrong, we are using private API just don't crash.
2299 if self.debug:
2298 if self.debug:
2300 print("Error detecting if completing a non-finished string :", e, '|')
2299 print("Error detecting if completing a non-finished string :", e, '|')
2301
2300
2302 if not try_jedi:
2301 if not try_jedi:
2303 return iter([])
2302 return iter([])
2304 try:
2303 try:
2305 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2304 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2306 except Exception as e:
2305 except Exception as e:
2307 if self.debug:
2306 if self.debug:
2308 return iter(
2307 return iter(
2309 [
2308 [
2310 _FakeJediCompletion(
2309 _FakeJediCompletion(
2311 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2310 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2312 % (e)
2311 % (e)
2313 )
2312 )
2314 ]
2313 ]
2315 )
2314 )
2316 else:
2315 else:
2317 return iter([])
2316 return iter([])
2318
2317
2319 @context_matcher()
2318 @context_matcher()
2320 def python_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2319 def python_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2321 """Match attributes or global python names"""
2320 """Match attributes or global python names"""
2322 text = context.line_with_cursor
2321 text = context.line_with_cursor
2323 if "." in text:
2322 if "." in text:
2324 try:
2323 try:
2325 matches, fragment = self._attr_matches(text, include_prefix=False)
2324 matches, fragment = self._attr_matches(text, include_prefix=False)
2326 if text.endswith(".") and self.omit__names:
2325 if text.endswith(".") and self.omit__names:
2327 if self.omit__names == 1:
2326 if self.omit__names == 1:
2328 # true if txt is _not_ a __ name, false otherwise:
2327 # true if txt is _not_ a __ name, false otherwise:
2329 no__name = lambda txt: re.match(r".*\.__.*?__", txt) is None
2328 no__name = lambda txt: re.match(r".*\.__.*?__", txt) is None
2330 else:
2329 else:
2331 # true if txt is _not_ a _ name, false otherwise:
2330 # true if txt is _not_ a _ name, false otherwise:
2332 no__name = (
2331 no__name = (
2333 lambda txt: re.match(r"\._.*?", txt[txt.rindex(".") :])
2332 lambda txt: re.match(r"\._.*?", txt[txt.rindex(".") :])
2334 is None
2333 is None
2335 )
2334 )
2336 matches = filter(no__name, matches)
2335 matches = filter(no__name, matches)
2337 return _convert_matcher_v1_result_to_v2(
2336 return _convert_matcher_v1_result_to_v2(
2338 matches, type="attribute", fragment=fragment
2337 matches, type="attribute", fragment=fragment
2339 )
2338 )
2340 except NameError:
2339 except NameError:
2341 # catches <undefined attributes>.<tab>
2340 # catches <undefined attributes>.<tab>
2342 matches = []
2341 matches = []
2343 return _convert_matcher_v1_result_to_v2(matches, type="attribute")
2342 return _convert_matcher_v1_result_to_v2(matches, type="attribute")
2344 else:
2343 else:
2345 matches = self.global_matches(context.token)
2344 matches = self.global_matches(context.token)
2346 # TODO: maybe distinguish between functions, modules and just "variables"
2345 # TODO: maybe distinguish between functions, modules and just "variables"
2347 return _convert_matcher_v1_result_to_v2(matches, type="variable")
2346 return _convert_matcher_v1_result_to_v2(matches, type="variable")
2348
2347
2349 @completion_matcher(api_version=1)
2348 @completion_matcher(api_version=1)
2350 def python_matches(self, text: str) -> Iterable[str]:
2349 def python_matches(self, text: str) -> Iterable[str]:
2351 """Match attributes or global python names.
2350 """Match attributes or global python names.
2352
2351
2353 .. deprecated:: 8.27
2352 .. deprecated:: 8.27
2354 You can use :meth:`python_matcher` instead."""
2353 You can use :meth:`python_matcher` instead."""
2355 if "." in text:
2354 if "." in text:
2356 try:
2355 try:
2357 matches = self.attr_matches(text)
2356 matches = self.attr_matches(text)
2358 if text.endswith('.') and self.omit__names:
2357 if text.endswith('.') and self.omit__names:
2359 if self.omit__names == 1:
2358 if self.omit__names == 1:
2360 # true if txt is _not_ a __ name, false otherwise:
2359 # true if txt is _not_ a __ name, false otherwise:
2361 no__name = (lambda txt:
2360 no__name = (lambda txt:
2362 re.match(r'.*\.__.*?__',txt) is None)
2361 re.match(r'.*\.__.*?__',txt) is None)
2363 else:
2362 else:
2364 # true if txt is _not_ a _ name, false otherwise:
2363 # true if txt is _not_ a _ name, false otherwise:
2365 no__name = (lambda txt:
2364 no__name = (lambda txt:
2366 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
2365 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
2367 matches = filter(no__name, matches)
2366 matches = filter(no__name, matches)
2368 except NameError:
2367 except NameError:
2369 # catches <undefined attributes>.<tab>
2368 # catches <undefined attributes>.<tab>
2370 matches = []
2369 matches = []
2371 else:
2370 else:
2372 matches = self.global_matches(text)
2371 matches = self.global_matches(text)
2373 return matches
2372 return matches
2374
2373
2375 def _default_arguments_from_docstring(self, doc):
2374 def _default_arguments_from_docstring(self, doc):
2376 """Parse the first line of docstring for call signature.
2375 """Parse the first line of docstring for call signature.
2377
2376
2378 Docstring should be of the form 'min(iterable[, key=func])\n'.
2377 Docstring should be of the form 'min(iterable[, key=func])\n'.
2379 It can also parse cython docstring of the form
2378 It can also parse cython docstring of the form
2380 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2379 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2381 """
2380 """
2382 if doc is None:
2381 if doc is None:
2383 return []
2382 return []
2384
2383
2385 #care only the firstline
2384 #care only the firstline
2386 line = doc.lstrip().splitlines()[0]
2385 line = doc.lstrip().splitlines()[0]
2387
2386
2388 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2387 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2389 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2388 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2390 sig = self.docstring_sig_re.search(line)
2389 sig = self.docstring_sig_re.search(line)
2391 if sig is None:
2390 if sig is None:
2392 return []
2391 return []
2393 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2392 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2394 sig = sig.groups()[0].split(',')
2393 sig = sig.groups()[0].split(',')
2395 ret = []
2394 ret = []
2396 for s in sig:
2395 for s in sig:
2397 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2396 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2398 ret += self.docstring_kwd_re.findall(s)
2397 ret += self.docstring_kwd_re.findall(s)
2399 return ret
2398 return ret
2400
2399
2401 def _default_arguments(self, obj):
2400 def _default_arguments(self, obj):
2402 """Return the list of default arguments of obj if it is callable,
2401 """Return the list of default arguments of obj if it is callable,
2403 or empty list otherwise."""
2402 or empty list otherwise."""
2404 call_obj = obj
2403 call_obj = obj
2405 ret = []
2404 ret = []
2406 if inspect.isbuiltin(obj):
2405 if inspect.isbuiltin(obj):
2407 pass
2406 pass
2408 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2407 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2409 if inspect.isclass(obj):
2408 if inspect.isclass(obj):
2410 #for cython embedsignature=True the constructor docstring
2409 #for cython embedsignature=True the constructor docstring
2411 #belongs to the object itself not __init__
2410 #belongs to the object itself not __init__
2412 ret += self._default_arguments_from_docstring(
2411 ret += self._default_arguments_from_docstring(
2413 getattr(obj, '__doc__', ''))
2412 getattr(obj, '__doc__', ''))
2414 # for classes, check for __init__,__new__
2413 # for classes, check for __init__,__new__
2415 call_obj = (getattr(obj, '__init__', None) or
2414 call_obj = (getattr(obj, '__init__', None) or
2416 getattr(obj, '__new__', None))
2415 getattr(obj, '__new__', None))
2417 # for all others, check if they are __call__able
2416 # for all others, check if they are __call__able
2418 elif hasattr(obj, '__call__'):
2417 elif hasattr(obj, '__call__'):
2419 call_obj = obj.__call__
2418 call_obj = obj.__call__
2420 ret += self._default_arguments_from_docstring(
2419 ret += self._default_arguments_from_docstring(
2421 getattr(call_obj, '__doc__', ''))
2420 getattr(call_obj, '__doc__', ''))
2422
2421
2423 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2422 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2424 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2423 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2425
2424
2426 try:
2425 try:
2427 sig = inspect.signature(obj)
2426 sig = inspect.signature(obj)
2428 ret.extend(k for k, v in sig.parameters.items() if
2427 ret.extend(k for k, v in sig.parameters.items() if
2429 v.kind in _keeps)
2428 v.kind in _keeps)
2430 except ValueError:
2429 except ValueError:
2431 pass
2430 pass
2432
2431
2433 return list(set(ret))
2432 return list(set(ret))
2434
2433
2435 @context_matcher()
2434 @context_matcher()
2436 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2435 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2437 """Match named parameters (kwargs) of the last open function."""
2436 """Match named parameters (kwargs) of the last open function."""
2438 matches = self.python_func_kw_matches(context.token)
2437 matches = self.python_func_kw_matches(context.token)
2439 return _convert_matcher_v1_result_to_v2(matches, type="param")
2438 return _convert_matcher_v1_result_to_v2(matches, type="param")
2440
2439
2441 def python_func_kw_matches(self, text):
2440 def python_func_kw_matches(self, text):
2442 """Match named parameters (kwargs) of the last open function.
2441 """Match named parameters (kwargs) of the last open function.
2443
2442
2444 .. deprecated:: 8.6
2443 .. deprecated:: 8.6
2445 You can use :meth:`python_func_kw_matcher` instead.
2444 You can use :meth:`python_func_kw_matcher` instead.
2446 """
2445 """
2447
2446
2448 if "." in text: # a parameter cannot be dotted
2447 if "." in text: # a parameter cannot be dotted
2449 return []
2448 return []
2450 try: regexp = self.__funcParamsRegex
2449 try: regexp = self.__funcParamsRegex
2451 except AttributeError:
2450 except AttributeError:
2452 regexp = self.__funcParamsRegex = re.compile(r'''
2451 regexp = self.__funcParamsRegex = re.compile(r'''
2453 '.*?(?<!\\)' | # single quoted strings or
2452 '.*?(?<!\\)' | # single quoted strings or
2454 ".*?(?<!\\)" | # double quoted strings or
2453 ".*?(?<!\\)" | # double quoted strings or
2455 \w+ | # identifier
2454 \w+ | # identifier
2456 \S # other characters
2455 \S # other characters
2457 ''', re.VERBOSE | re.DOTALL)
2456 ''', re.VERBOSE | re.DOTALL)
2458 # 1. find the nearest identifier that comes before an unclosed
2457 # 1. find the nearest identifier that comes before an unclosed
2459 # parenthesis before the cursor
2458 # parenthesis before the cursor
2460 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2459 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2461 tokens = regexp.findall(self.text_until_cursor)
2460 tokens = regexp.findall(self.text_until_cursor)
2462 iterTokens = reversed(tokens); openPar = 0
2461 iterTokens = reversed(tokens); openPar = 0
2463
2462
2464 for token in iterTokens:
2463 for token in iterTokens:
2465 if token == ')':
2464 if token == ')':
2466 openPar -= 1
2465 openPar -= 1
2467 elif token == '(':
2466 elif token == '(':
2468 openPar += 1
2467 openPar += 1
2469 if openPar > 0:
2468 if openPar > 0:
2470 # found the last unclosed parenthesis
2469 # found the last unclosed parenthesis
2471 break
2470 break
2472 else:
2471 else:
2473 return []
2472 return []
2474 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2473 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2475 ids = []
2474 ids = []
2476 isId = re.compile(r'\w+$').match
2475 isId = re.compile(r'\w+$').match
2477
2476
2478 while True:
2477 while True:
2479 try:
2478 try:
2480 ids.append(next(iterTokens))
2479 ids.append(next(iterTokens))
2481 if not isId(ids[-1]):
2480 if not isId(ids[-1]):
2482 ids.pop(); break
2481 ids.pop(); break
2483 if not next(iterTokens) == '.':
2482 if not next(iterTokens) == '.':
2484 break
2483 break
2485 except StopIteration:
2484 except StopIteration:
2486 break
2485 break
2487
2486
2488 # Find all named arguments already assigned to, as to avoid suggesting
2487 # Find all named arguments already assigned to, as to avoid suggesting
2489 # them again
2488 # them again
2490 usedNamedArgs = set()
2489 usedNamedArgs = set()
2491 par_level = -1
2490 par_level = -1
2492 for token, next_token in zip(tokens, tokens[1:]):
2491 for token, next_token in zip(tokens, tokens[1:]):
2493 if token == '(':
2492 if token == '(':
2494 par_level += 1
2493 par_level += 1
2495 elif token == ')':
2494 elif token == ')':
2496 par_level -= 1
2495 par_level -= 1
2497
2496
2498 if par_level != 0:
2497 if par_level != 0:
2499 continue
2498 continue
2500
2499
2501 if next_token != '=':
2500 if next_token != '=':
2502 continue
2501 continue
2503
2502
2504 usedNamedArgs.add(token)
2503 usedNamedArgs.add(token)
2505
2504
2506 argMatches = []
2505 argMatches = []
2507 try:
2506 try:
2508 callableObj = '.'.join(ids[::-1])
2507 callableObj = '.'.join(ids[::-1])
2509 namedArgs = self._default_arguments(eval(callableObj,
2508 namedArgs = self._default_arguments(eval(callableObj,
2510 self.namespace))
2509 self.namespace))
2511
2510
2512 # Remove used named arguments from the list, no need to show twice
2511 # Remove used named arguments from the list, no need to show twice
2513 for namedArg in set(namedArgs) - usedNamedArgs:
2512 for namedArg in set(namedArgs) - usedNamedArgs:
2514 if namedArg.startswith(text):
2513 if namedArg.startswith(text):
2515 argMatches.append("%s=" %namedArg)
2514 argMatches.append("%s=" %namedArg)
2516 except:
2515 except:
2517 pass
2516 pass
2518
2517
2519 return argMatches
2518 return argMatches
2520
2519
2521 @staticmethod
2520 @staticmethod
2522 def _get_keys(obj: Any) -> List[Any]:
2521 def _get_keys(obj: Any) -> List[Any]:
2523 # Objects can define their own completions by defining an
2522 # Objects can define their own completions by defining an
2524 # _ipy_key_completions_() method.
2523 # _ipy_key_completions_() method.
2525 method = get_real_method(obj, '_ipython_key_completions_')
2524 method = get_real_method(obj, '_ipython_key_completions_')
2526 if method is not None:
2525 if method is not None:
2527 return method()
2526 return method()
2528
2527
2529 # Special case some common in-memory dict-like types
2528 # Special case some common in-memory dict-like types
2530 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2529 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2531 try:
2530 try:
2532 return list(obj.keys())
2531 return list(obj.keys())
2533 except Exception:
2532 except Exception:
2534 return []
2533 return []
2535 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2534 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2536 try:
2535 try:
2537 return list(obj.obj.keys())
2536 return list(obj.obj.keys())
2538 except Exception:
2537 except Exception:
2539 return []
2538 return []
2540 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2539 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2541 _safe_isinstance(obj, 'numpy', 'void'):
2540 _safe_isinstance(obj, 'numpy', 'void'):
2542 return obj.dtype.names or []
2541 return obj.dtype.names or []
2543 return []
2542 return []
2544
2543
2545 @context_matcher()
2544 @context_matcher()
2546 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2545 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2547 """Match string keys in a dictionary, after e.g. ``foo[``."""
2546 """Match string keys in a dictionary, after e.g. ``foo[``."""
2548 matches = self.dict_key_matches(context.token)
2547 matches = self.dict_key_matches(context.token)
2549 return _convert_matcher_v1_result_to_v2(
2548 return _convert_matcher_v1_result_to_v2(
2550 matches, type="dict key", suppress_if_matches=True
2549 matches, type="dict key", suppress_if_matches=True
2551 )
2550 )
2552
2551
2553 def dict_key_matches(self, text: str) -> List[str]:
2552 def dict_key_matches(self, text: str) -> List[str]:
2554 """Match string keys in a dictionary, after e.g. ``foo[``.
2553 """Match string keys in a dictionary, after e.g. ``foo[``.
2555
2554
2556 .. deprecated:: 8.6
2555 .. deprecated:: 8.6
2557 You can use :meth:`dict_key_matcher` instead.
2556 You can use :meth:`dict_key_matcher` instead.
2558 """
2557 """
2559
2558
2560 # Short-circuit on closed dictionary (regular expression would
2559 # Short-circuit on closed dictionary (regular expression would
2561 # not match anyway, but would take quite a while).
2560 # not match anyway, but would take quite a while).
2562 if self.text_until_cursor.strip().endswith("]"):
2561 if self.text_until_cursor.strip().endswith("]"):
2563 return []
2562 return []
2564
2563
2565 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2564 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2566
2565
2567 if match is None:
2566 if match is None:
2568 return []
2567 return []
2569
2568
2570 expr, prior_tuple_keys, key_prefix = match.groups()
2569 expr, prior_tuple_keys, key_prefix = match.groups()
2571
2570
2572 obj = self._evaluate_expr(expr)
2571 obj = self._evaluate_expr(expr)
2573
2572
2574 if obj is not_found:
2573 if obj is not_found:
2575 return []
2574 return []
2576
2575
2577 keys = self._get_keys(obj)
2576 keys = self._get_keys(obj)
2578 if not keys:
2577 if not keys:
2579 return keys
2578 return keys
2580
2579
2581 tuple_prefix = guarded_eval(
2580 tuple_prefix = guarded_eval(
2582 prior_tuple_keys,
2581 prior_tuple_keys,
2583 EvaluationContext(
2582 EvaluationContext(
2584 globals=self.global_namespace,
2583 globals=self.global_namespace,
2585 locals=self.namespace,
2584 locals=self.namespace,
2586 evaluation=self.evaluation, # type: ignore
2585 evaluation=self.evaluation, # type: ignore
2587 in_subscript=True,
2586 in_subscript=True,
2588 ),
2587 ),
2589 )
2588 )
2590
2589
2591 closing_quote, token_offset, matches = match_dict_keys(
2590 closing_quote, token_offset, matches = match_dict_keys(
2592 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2591 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2593 )
2592 )
2594 if not matches:
2593 if not matches:
2595 return []
2594 return []
2596
2595
2597 # get the cursor position of
2596 # get the cursor position of
2598 # - the text being completed
2597 # - the text being completed
2599 # - the start of the key text
2598 # - the start of the key text
2600 # - the start of the completion
2599 # - the start of the completion
2601 text_start = len(self.text_until_cursor) - len(text)
2600 text_start = len(self.text_until_cursor) - len(text)
2602 if key_prefix:
2601 if key_prefix:
2603 key_start = match.start(3)
2602 key_start = match.start(3)
2604 completion_start = key_start + token_offset
2603 completion_start = key_start + token_offset
2605 else:
2604 else:
2606 key_start = completion_start = match.end()
2605 key_start = completion_start = match.end()
2607
2606
2608 # grab the leading prefix, to make sure all completions start with `text`
2607 # grab the leading prefix, to make sure all completions start with `text`
2609 if text_start > key_start:
2608 if text_start > key_start:
2610 leading = ''
2609 leading = ''
2611 else:
2610 else:
2612 leading = text[text_start:completion_start]
2611 leading = text[text_start:completion_start]
2613
2612
2614 # append closing quote and bracket as appropriate
2613 # append closing quote and bracket as appropriate
2615 # this is *not* appropriate if the opening quote or bracket is outside
2614 # this is *not* appropriate if the opening quote or bracket is outside
2616 # the text given to this method, e.g. `d["""a\nt
2615 # the text given to this method, e.g. `d["""a\nt
2617 can_close_quote = False
2616 can_close_quote = False
2618 can_close_bracket = False
2617 can_close_bracket = False
2619
2618
2620 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2619 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2621
2620
2622 if continuation.startswith(closing_quote):
2621 if continuation.startswith(closing_quote):
2623 # do not close if already closed, e.g. `d['a<tab>'`
2622 # do not close if already closed, e.g. `d['a<tab>'`
2624 continuation = continuation[len(closing_quote) :]
2623 continuation = continuation[len(closing_quote) :]
2625 else:
2624 else:
2626 can_close_quote = True
2625 can_close_quote = True
2627
2626
2628 continuation = continuation.strip()
2627 continuation = continuation.strip()
2629
2628
2630 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2629 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2631 # handling it is out of scope, so let's avoid appending suffixes.
2630 # handling it is out of scope, so let's avoid appending suffixes.
2632 has_known_tuple_handling = isinstance(obj, dict)
2631 has_known_tuple_handling = isinstance(obj, dict)
2633
2632
2634 can_close_bracket = (
2633 can_close_bracket = (
2635 not continuation.startswith("]") and self.auto_close_dict_keys
2634 not continuation.startswith("]") and self.auto_close_dict_keys
2636 )
2635 )
2637 can_close_tuple_item = (
2636 can_close_tuple_item = (
2638 not continuation.startswith(",")
2637 not continuation.startswith(",")
2639 and has_known_tuple_handling
2638 and has_known_tuple_handling
2640 and self.auto_close_dict_keys
2639 and self.auto_close_dict_keys
2641 )
2640 )
2642 can_close_quote = can_close_quote and self.auto_close_dict_keys
2641 can_close_quote = can_close_quote and self.auto_close_dict_keys
2643
2642
2644 # fast path if closing quote should be appended but not suffix is allowed
2643 # fast path if closing quote should be appended but not suffix is allowed
2645 if not can_close_quote and not can_close_bracket and closing_quote:
2644 if not can_close_quote and not can_close_bracket and closing_quote:
2646 return [leading + k for k in matches]
2645 return [leading + k for k in matches]
2647
2646
2648 results = []
2647 results = []
2649
2648
2650 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2649 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2651
2650
2652 for k, state_flag in matches.items():
2651 for k, state_flag in matches.items():
2653 result = leading + k
2652 result = leading + k
2654 if can_close_quote and closing_quote:
2653 if can_close_quote and closing_quote:
2655 result += closing_quote
2654 result += closing_quote
2656
2655
2657 if state_flag == end_of_tuple_or_item:
2656 if state_flag == end_of_tuple_or_item:
2658 # We do not know which suffix to add,
2657 # We do not know which suffix to add,
2659 # e.g. both tuple item and string
2658 # e.g. both tuple item and string
2660 # match this item.
2659 # match this item.
2661 pass
2660 pass
2662
2661
2663 if state_flag in end_of_tuple_or_item and can_close_bracket:
2662 if state_flag in end_of_tuple_or_item and can_close_bracket:
2664 result += "]"
2663 result += "]"
2665 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2664 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2666 result += ", "
2665 result += ", "
2667 results.append(result)
2666 results.append(result)
2668 return results
2667 return results
2669
2668
2670 @context_matcher()
2669 @context_matcher()
2671 def unicode_name_matcher(self, context: CompletionContext):
2670 def unicode_name_matcher(self, context: CompletionContext):
2672 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2671 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2673 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2672 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2674 return _convert_matcher_v1_result_to_v2(
2673 return _convert_matcher_v1_result_to_v2(
2675 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2674 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2676 )
2675 )
2677
2676
2678 @staticmethod
2677 @staticmethod
2679 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2678 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2680 """Match Latex-like syntax for unicode characters base
2679 """Match Latex-like syntax for unicode characters base
2681 on the name of the character.
2680 on the name of the character.
2682
2681
2683 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2682 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2684
2683
2685 Works only on valid python 3 identifier, or on combining characters that
2684 Works only on valid python 3 identifier, or on combining characters that
2686 will combine to form a valid identifier.
2685 will combine to form a valid identifier.
2687 """
2686 """
2688 slashpos = text.rfind('\\')
2687 slashpos = text.rfind('\\')
2689 if slashpos > -1:
2688 if slashpos > -1:
2690 s = text[slashpos+1:]
2689 s = text[slashpos+1:]
2691 try :
2690 try :
2692 unic = unicodedata.lookup(s)
2691 unic = unicodedata.lookup(s)
2693 # allow combining chars
2692 # allow combining chars
2694 if ('a'+unic).isidentifier():
2693 if ('a'+unic).isidentifier():
2695 return '\\'+s,[unic]
2694 return '\\'+s,[unic]
2696 except KeyError:
2695 except KeyError:
2697 pass
2696 pass
2698 return '', []
2697 return '', []
2699
2698
2700 @context_matcher()
2699 @context_matcher()
2701 def latex_name_matcher(self, context: CompletionContext):
2700 def latex_name_matcher(self, context: CompletionContext):
2702 """Match Latex syntax for unicode characters.
2701 """Match Latex syntax for unicode characters.
2703
2702
2704 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2703 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2705 """
2704 """
2706 fragment, matches = self.latex_matches(context.text_until_cursor)
2705 fragment, matches = self.latex_matches(context.text_until_cursor)
2707 return _convert_matcher_v1_result_to_v2(
2706 return _convert_matcher_v1_result_to_v2(
2708 matches, type="latex", fragment=fragment, suppress_if_matches=True
2707 matches, type="latex", fragment=fragment, suppress_if_matches=True
2709 )
2708 )
2710
2709
2711 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2710 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2712 """Match Latex syntax for unicode characters.
2711 """Match Latex syntax for unicode characters.
2713
2712
2714 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2713 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2715
2714
2716 .. deprecated:: 8.6
2715 .. deprecated:: 8.6
2717 You can use :meth:`latex_name_matcher` instead.
2716 You can use :meth:`latex_name_matcher` instead.
2718 """
2717 """
2719 slashpos = text.rfind('\\')
2718 slashpos = text.rfind('\\')
2720 if slashpos > -1:
2719 if slashpos > -1:
2721 s = text[slashpos:]
2720 s = text[slashpos:]
2722 if s in latex_symbols:
2721 if s in latex_symbols:
2723 # Try to complete a full latex symbol to unicode
2722 # Try to complete a full latex symbol to unicode
2724 # \\alpha -> Ξ±
2723 # \\alpha -> Ξ±
2725 return s, [latex_symbols[s]]
2724 return s, [latex_symbols[s]]
2726 else:
2725 else:
2727 # If a user has partially typed a latex symbol, give them
2726 # If a user has partially typed a latex symbol, give them
2728 # a full list of options \al -> [\aleph, \alpha]
2727 # a full list of options \al -> [\aleph, \alpha]
2729 matches = [k for k in latex_symbols if k.startswith(s)]
2728 matches = [k for k in latex_symbols if k.startswith(s)]
2730 if matches:
2729 if matches:
2731 return s, matches
2730 return s, matches
2732 return '', ()
2731 return '', ()
2733
2732
2734 @context_matcher()
2733 @context_matcher()
2735 def custom_completer_matcher(self, context):
2734 def custom_completer_matcher(self, context):
2736 """Dispatch custom completer.
2735 """Dispatch custom completer.
2737
2736
2738 If a match is found, suppresses all other matchers except for Jedi.
2737 If a match is found, suppresses all other matchers except for Jedi.
2739 """
2738 """
2740 matches = self.dispatch_custom_completer(context.token) or []
2739 matches = self.dispatch_custom_completer(context.token) or []
2741 result = _convert_matcher_v1_result_to_v2(
2740 result = _convert_matcher_v1_result_to_v2(
2742 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2741 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2743 )
2742 )
2744 result["ordered"] = True
2743 result["ordered"] = True
2745 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2744 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2746 return result
2745 return result
2747
2746
2748 def dispatch_custom_completer(self, text):
2747 def dispatch_custom_completer(self, text):
2749 """
2748 """
2750 .. deprecated:: 8.6
2749 .. deprecated:: 8.6
2751 You can use :meth:`custom_completer_matcher` instead.
2750 You can use :meth:`custom_completer_matcher` instead.
2752 """
2751 """
2753 if not self.custom_completers:
2752 if not self.custom_completers:
2754 return
2753 return
2755
2754
2756 line = self.line_buffer
2755 line = self.line_buffer
2757 if not line.strip():
2756 if not line.strip():
2758 return None
2757 return None
2759
2758
2760 # Create a little structure to pass all the relevant information about
2759 # Create a little structure to pass all the relevant information about
2761 # the current completion to any custom completer.
2760 # the current completion to any custom completer.
2762 event = SimpleNamespace()
2761 event = SimpleNamespace()
2763 event.line = line
2762 event.line = line
2764 event.symbol = text
2763 event.symbol = text
2765 cmd = line.split(None,1)[0]
2764 cmd = line.split(None,1)[0]
2766 event.command = cmd
2765 event.command = cmd
2767 event.text_until_cursor = self.text_until_cursor
2766 event.text_until_cursor = self.text_until_cursor
2768
2767
2769 # for foo etc, try also to find completer for %foo
2768 # for foo etc, try also to find completer for %foo
2770 if not cmd.startswith(self.magic_escape):
2769 if not cmd.startswith(self.magic_escape):
2771 try_magic = self.custom_completers.s_matches(
2770 try_magic = self.custom_completers.s_matches(
2772 self.magic_escape + cmd)
2771 self.magic_escape + cmd)
2773 else:
2772 else:
2774 try_magic = []
2773 try_magic = []
2775
2774
2776 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2775 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2777 try_magic,
2776 try_magic,
2778 self.custom_completers.flat_matches(self.text_until_cursor)):
2777 self.custom_completers.flat_matches(self.text_until_cursor)):
2779 try:
2778 try:
2780 res = c(event)
2779 res = c(event)
2781 if res:
2780 if res:
2782 # first, try case sensitive match
2781 # first, try case sensitive match
2783 withcase = [r for r in res if r.startswith(text)]
2782 withcase = [r for r in res if r.startswith(text)]
2784 if withcase:
2783 if withcase:
2785 return withcase
2784 return withcase
2786 # if none, then case insensitive ones are ok too
2785 # if none, then case insensitive ones are ok too
2787 text_low = text.lower()
2786 text_low = text.lower()
2788 return [r for r in res if r.lower().startswith(text_low)]
2787 return [r for r in res if r.lower().startswith(text_low)]
2789 except TryNext:
2788 except TryNext:
2790 pass
2789 pass
2791 except KeyboardInterrupt:
2790 except KeyboardInterrupt:
2792 """
2791 """
2793 If custom completer take too long,
2792 If custom completer take too long,
2794 let keyboard interrupt abort and return nothing.
2793 let keyboard interrupt abort and return nothing.
2795 """
2794 """
2796 break
2795 break
2797
2796
2798 return None
2797 return None
2799
2798
2800 def completions(self, text: str, offset: int)->Iterator[Completion]:
2799 def completions(self, text: str, offset: int)->Iterator[Completion]:
2801 """
2800 """
2802 Returns an iterator over the possible completions
2801 Returns an iterator over the possible completions
2803
2802
2804 .. warning::
2803 .. warning::
2805
2804
2806 Unstable
2805 Unstable
2807
2806
2808 This function is unstable, API may change without warning.
2807 This function is unstable, API may change without warning.
2809 It will also raise unless use in proper context manager.
2808 It will also raise unless use in proper context manager.
2810
2809
2811 Parameters
2810 Parameters
2812 ----------
2811 ----------
2813 text : str
2812 text : str
2814 Full text of the current input, multi line string.
2813 Full text of the current input, multi line string.
2815 offset : int
2814 offset : int
2816 Integer representing the position of the cursor in ``text``. Offset
2815 Integer representing the position of the cursor in ``text``. Offset
2817 is 0-based indexed.
2816 is 0-based indexed.
2818
2817
2819 Yields
2818 Yields
2820 ------
2819 ------
2821 Completion
2820 Completion
2822
2821
2823 Notes
2822 Notes
2824 -----
2823 -----
2825 The cursor on a text can either be seen as being "in between"
2824 The cursor on a text can either be seen as being "in between"
2826 characters or "On" a character depending on the interface visible to
2825 characters or "On" a character depending on the interface visible to
2827 the user. For consistency the cursor being on "in between" characters X
2826 the user. For consistency the cursor being on "in between" characters X
2828 and Y is equivalent to the cursor being "on" character Y, that is to say
2827 and Y is equivalent to the cursor being "on" character Y, that is to say
2829 the character the cursor is on is considered as being after the cursor.
2828 the character the cursor is on is considered as being after the cursor.
2830
2829
2831 Combining characters may span more that one position in the
2830 Combining characters may span more that one position in the
2832 text.
2831 text.
2833
2832
2834 .. note::
2833 .. note::
2835
2834
2836 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2835 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2837 fake Completion token to distinguish completion returned by Jedi
2836 fake Completion token to distinguish completion returned by Jedi
2838 and usual IPython completion.
2837 and usual IPython completion.
2839
2838
2840 .. note::
2839 .. note::
2841
2840
2842 Completions are not completely deduplicated yet. If identical
2841 Completions are not completely deduplicated yet. If identical
2843 completions are coming from different sources this function does not
2842 completions are coming from different sources this function does not
2844 ensure that each completion object will only be present once.
2843 ensure that each completion object will only be present once.
2845 """
2844 """
2846 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2845 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2847 "It may change without warnings. "
2846 "It may change without warnings. "
2848 "Use in corresponding context manager.",
2847 "Use in corresponding context manager.",
2849 category=ProvisionalCompleterWarning, stacklevel=2)
2848 category=ProvisionalCompleterWarning, stacklevel=2)
2850
2849
2851 seen = set()
2850 seen = set()
2852 profiler:Optional[cProfile.Profile]
2851 profiler:Optional[cProfile.Profile]
2853 try:
2852 try:
2854 if self.profile_completions:
2853 if self.profile_completions:
2855 import cProfile
2854 import cProfile
2856 profiler = cProfile.Profile()
2855 profiler = cProfile.Profile()
2857 profiler.enable()
2856 profiler.enable()
2858 else:
2857 else:
2859 profiler = None
2858 profiler = None
2860
2859
2861 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2860 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2862 if c and (c in seen):
2861 if c and (c in seen):
2863 continue
2862 continue
2864 yield c
2863 yield c
2865 seen.add(c)
2864 seen.add(c)
2866 except KeyboardInterrupt:
2865 except KeyboardInterrupt:
2867 """if completions take too long and users send keyboard interrupt,
2866 """if completions take too long and users send keyboard interrupt,
2868 do not crash and return ASAP. """
2867 do not crash and return ASAP. """
2869 pass
2868 pass
2870 finally:
2869 finally:
2871 if profiler is not None:
2870 if profiler is not None:
2872 profiler.disable()
2871 profiler.disable()
2873 ensure_dir_exists(self.profiler_output_dir)
2872 ensure_dir_exists(self.profiler_output_dir)
2874 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2873 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2875 print("Writing profiler output to", output_path)
2874 print("Writing profiler output to", output_path)
2876 profiler.dump_stats(output_path)
2875 profiler.dump_stats(output_path)
2877
2876
2878 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2877 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2879 """
2878 """
2880 Core completion module.Same signature as :any:`completions`, with the
2879 Core completion module.Same signature as :any:`completions`, with the
2881 extra `timeout` parameter (in seconds).
2880 extra `timeout` parameter (in seconds).
2882
2881
2883 Computing jedi's completion ``.type`` can be quite expensive (it is a
2882 Computing jedi's completion ``.type`` can be quite expensive (it is a
2884 lazy property) and can require some warm-up, more warm up than just
2883 lazy property) and can require some warm-up, more warm up than just
2885 computing the ``name`` of a completion. The warm-up can be :
2884 computing the ``name`` of a completion. The warm-up can be :
2886
2885
2887 - Long warm-up the first time a module is encountered after
2886 - Long warm-up the first time a module is encountered after
2888 install/update: actually build parse/inference tree.
2887 install/update: actually build parse/inference tree.
2889
2888
2890 - first time the module is encountered in a session: load tree from
2889 - first time the module is encountered in a session: load tree from
2891 disk.
2890 disk.
2892
2891
2893 We don't want to block completions for tens of seconds so we give the
2892 We don't want to block completions for tens of seconds so we give the
2894 completer a "budget" of ``_timeout`` seconds per invocation to compute
2893 completer a "budget" of ``_timeout`` seconds per invocation to compute
2895 completions types, the completions that have not yet been computed will
2894 completions types, the completions that have not yet been computed will
2896 be marked as "unknown" an will have a chance to be computed next round
2895 be marked as "unknown" an will have a chance to be computed next round
2897 are things get cached.
2896 are things get cached.
2898
2897
2899 Keep in mind that Jedi is not the only thing treating the completion so
2898 Keep in mind that Jedi is not the only thing treating the completion so
2900 keep the timeout short-ish as if we take more than 0.3 second we still
2899 keep the timeout short-ish as if we take more than 0.3 second we still
2901 have lots of processing to do.
2900 have lots of processing to do.
2902
2901
2903 """
2902 """
2904 deadline = time.monotonic() + _timeout
2903 deadline = time.monotonic() + _timeout
2905
2904
2906 before = full_text[:offset]
2905 before = full_text[:offset]
2907 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2906 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2908
2907
2909 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2908 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2910
2909
2911 def is_non_jedi_result(
2910 def is_non_jedi_result(
2912 result: MatcherResult, identifier: str
2911 result: MatcherResult, identifier: str
2913 ) -> TypeGuard[SimpleMatcherResult]:
2912 ) -> TypeGuard[SimpleMatcherResult]:
2914 return identifier != jedi_matcher_id
2913 return identifier != jedi_matcher_id
2915
2914
2916 results = self._complete(
2915 results = self._complete(
2917 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2916 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2918 )
2917 )
2919
2918
2920 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2919 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2921 identifier: result
2920 identifier: result
2922 for identifier, result in results.items()
2921 for identifier, result in results.items()
2923 if is_non_jedi_result(result, identifier)
2922 if is_non_jedi_result(result, identifier)
2924 }
2923 }
2925
2924
2926 jedi_matches = (
2925 jedi_matches = (
2927 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2926 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2928 if jedi_matcher_id in results
2927 if jedi_matcher_id in results
2929 else ()
2928 else ()
2930 )
2929 )
2931
2930
2932 iter_jm = iter(jedi_matches)
2931 iter_jm = iter(jedi_matches)
2933 if _timeout:
2932 if _timeout:
2934 for jm in iter_jm:
2933 for jm in iter_jm:
2935 try:
2934 try:
2936 type_ = jm.type
2935 type_ = jm.type
2937 except Exception:
2936 except Exception:
2938 if self.debug:
2937 if self.debug:
2939 print("Error in Jedi getting type of ", jm)
2938 print("Error in Jedi getting type of ", jm)
2940 type_ = None
2939 type_ = None
2941 delta = len(jm.name_with_symbols) - len(jm.complete)
2940 delta = len(jm.name_with_symbols) - len(jm.complete)
2942 if type_ == 'function':
2941 if type_ == 'function':
2943 signature = _make_signature(jm)
2942 signature = _make_signature(jm)
2944 else:
2943 else:
2945 signature = ''
2944 signature = ''
2946 yield Completion(start=offset - delta,
2945 yield Completion(start=offset - delta,
2947 end=offset,
2946 end=offset,
2948 text=jm.name_with_symbols,
2947 text=jm.name_with_symbols,
2949 type=type_,
2948 type=type_,
2950 signature=signature,
2949 signature=signature,
2951 _origin='jedi')
2950 _origin='jedi')
2952
2951
2953 if time.monotonic() > deadline:
2952 if time.monotonic() > deadline:
2954 break
2953 break
2955
2954
2956 for jm in iter_jm:
2955 for jm in iter_jm:
2957 delta = len(jm.name_with_symbols) - len(jm.complete)
2956 delta = len(jm.name_with_symbols) - len(jm.complete)
2958 yield Completion(
2957 yield Completion(
2959 start=offset - delta,
2958 start=offset - delta,
2960 end=offset,
2959 end=offset,
2961 text=jm.name_with_symbols,
2960 text=jm.name_with_symbols,
2962 type=_UNKNOWN_TYPE, # don't compute type for speed
2961 type=_UNKNOWN_TYPE, # don't compute type for speed
2963 _origin="jedi",
2962 _origin="jedi",
2964 signature="",
2963 signature="",
2965 )
2964 )
2966
2965
2967 # TODO:
2966 # TODO:
2968 # Suppress this, right now just for debug.
2967 # Suppress this, right now just for debug.
2969 if jedi_matches and non_jedi_results and self.debug:
2968 if jedi_matches and non_jedi_results and self.debug:
2970 some_start_offset = before.rfind(
2969 some_start_offset = before.rfind(
2971 next(iter(non_jedi_results.values()))["matched_fragment"]
2970 next(iter(non_jedi_results.values()))["matched_fragment"]
2972 )
2971 )
2973 yield Completion(
2972 yield Completion(
2974 start=some_start_offset,
2973 start=some_start_offset,
2975 end=offset,
2974 end=offset,
2976 text="--jedi/ipython--",
2975 text="--jedi/ipython--",
2977 _origin="debug",
2976 _origin="debug",
2978 type="none",
2977 type="none",
2979 signature="",
2978 signature="",
2980 )
2979 )
2981
2980
2982 ordered: List[Completion] = []
2981 ordered: List[Completion] = []
2983 sortable: List[Completion] = []
2982 sortable: List[Completion] = []
2984
2983
2985 for origin, result in non_jedi_results.items():
2984 for origin, result in non_jedi_results.items():
2986 matched_text = result["matched_fragment"]
2985 matched_text = result["matched_fragment"]
2987 start_offset = before.rfind(matched_text)
2986 start_offset = before.rfind(matched_text)
2988 is_ordered = result.get("ordered", False)
2987 is_ordered = result.get("ordered", False)
2989 container = ordered if is_ordered else sortable
2988 container = ordered if is_ordered else sortable
2990
2989
2991 # I'm unsure if this is always true, so let's assert and see if it
2990 # I'm unsure if this is always true, so let's assert and see if it
2992 # crash
2991 # crash
2993 assert before.endswith(matched_text)
2992 assert before.endswith(matched_text)
2994
2993
2995 for simple_completion in result["completions"]:
2994 for simple_completion in result["completions"]:
2996 completion = Completion(
2995 completion = Completion(
2997 start=start_offset,
2996 start=start_offset,
2998 end=offset,
2997 end=offset,
2999 text=simple_completion.text,
2998 text=simple_completion.text,
3000 _origin=origin,
2999 _origin=origin,
3001 signature="",
3000 signature="",
3002 type=simple_completion.type or _UNKNOWN_TYPE,
3001 type=simple_completion.type or _UNKNOWN_TYPE,
3003 )
3002 )
3004 container.append(completion)
3003 container.append(completion)
3005
3004
3006 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
3005 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
3007 :MATCHES_LIMIT
3006 :MATCHES_LIMIT
3008 ]
3007 ]
3009
3008
3010 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
3009 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
3011 """Find completions for the given text and line context.
3010 """Find completions for the given text and line context.
3012
3011
3013 Note that both the text and the line_buffer are optional, but at least
3012 Note that both the text and the line_buffer are optional, but at least
3014 one of them must be given.
3013 one of them must be given.
3015
3014
3016 Parameters
3015 Parameters
3017 ----------
3016 ----------
3018 text : string, optional
3017 text : string, optional
3019 Text to perform the completion on. If not given, the line buffer
3018 Text to perform the completion on. If not given, the line buffer
3020 is split using the instance's CompletionSplitter object.
3019 is split using the instance's CompletionSplitter object.
3021 line_buffer : string, optional
3020 line_buffer : string, optional
3022 If not given, the completer attempts to obtain the current line
3021 If not given, the completer attempts to obtain the current line
3023 buffer via readline. This keyword allows clients which are
3022 buffer via readline. This keyword allows clients which are
3024 requesting for text completions in non-readline contexts to inform
3023 requesting for text completions in non-readline contexts to inform
3025 the completer of the entire text.
3024 the completer of the entire text.
3026 cursor_pos : int, optional
3025 cursor_pos : int, optional
3027 Index of the cursor in the full line buffer. Should be provided by
3026 Index of the cursor in the full line buffer. Should be provided by
3028 remote frontends where kernel has no access to frontend state.
3027 remote frontends where kernel has no access to frontend state.
3029
3028
3030 Returns
3029 Returns
3031 -------
3030 -------
3032 Tuple of two items:
3031 Tuple of two items:
3033 text : str
3032 text : str
3034 Text that was actually used in the completion.
3033 Text that was actually used in the completion.
3035 matches : list
3034 matches : list
3036 A list of completion matches.
3035 A list of completion matches.
3037
3036
3038 Notes
3037 Notes
3039 -----
3038 -----
3040 This API is likely to be deprecated and replaced by
3039 This API is likely to be deprecated and replaced by
3041 :any:`IPCompleter.completions` in the future.
3040 :any:`IPCompleter.completions` in the future.
3042
3041
3043 """
3042 """
3044 warnings.warn('`Completer.complete` is pending deprecation since '
3043 warnings.warn('`Completer.complete` is pending deprecation since '
3045 'IPython 6.0 and will be replaced by `Completer.completions`.',
3044 'IPython 6.0 and will be replaced by `Completer.completions`.',
3046 PendingDeprecationWarning)
3045 PendingDeprecationWarning)
3047 # potential todo, FOLD the 3rd throw away argument of _complete
3046 # potential todo, FOLD the 3rd throw away argument of _complete
3048 # into the first 2 one.
3047 # into the first 2 one.
3049 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
3048 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
3050 # TODO: should we deprecate now, or does it stay?
3049 # TODO: should we deprecate now, or does it stay?
3051
3050
3052 results = self._complete(
3051 results = self._complete(
3053 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
3052 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
3054 )
3053 )
3055
3054
3056 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3055 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3057
3056
3058 return self._arrange_and_extract(
3057 return self._arrange_and_extract(
3059 results,
3058 results,
3060 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
3059 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
3061 skip_matchers={jedi_matcher_id},
3060 skip_matchers={jedi_matcher_id},
3062 # this API does not support different start/end positions (fragments of token).
3061 # this API does not support different start/end positions (fragments of token).
3063 abort_if_offset_changes=True,
3062 abort_if_offset_changes=True,
3064 )
3063 )
3065
3064
3066 def _arrange_and_extract(
3065 def _arrange_and_extract(
3067 self,
3066 self,
3068 results: Dict[str, MatcherResult],
3067 results: Dict[str, MatcherResult],
3069 skip_matchers: Set[str],
3068 skip_matchers: Set[str],
3070 abort_if_offset_changes: bool,
3069 abort_if_offset_changes: bool,
3071 ):
3070 ):
3072 sortable: List[AnyMatcherCompletion] = []
3071 sortable: List[AnyMatcherCompletion] = []
3073 ordered: List[AnyMatcherCompletion] = []
3072 ordered: List[AnyMatcherCompletion] = []
3074 most_recent_fragment = None
3073 most_recent_fragment = None
3075 for identifier, result in results.items():
3074 for identifier, result in results.items():
3076 if identifier in skip_matchers:
3075 if identifier in skip_matchers:
3077 continue
3076 continue
3078 if not result["completions"]:
3077 if not result["completions"]:
3079 continue
3078 continue
3080 if not most_recent_fragment:
3079 if not most_recent_fragment:
3081 most_recent_fragment = result["matched_fragment"]
3080 most_recent_fragment = result["matched_fragment"]
3082 if (
3081 if (
3083 abort_if_offset_changes
3082 abort_if_offset_changes
3084 and result["matched_fragment"] != most_recent_fragment
3083 and result["matched_fragment"] != most_recent_fragment
3085 ):
3084 ):
3086 break
3085 break
3087 if result.get("ordered", False):
3086 if result.get("ordered", False):
3088 ordered.extend(result["completions"])
3087 ordered.extend(result["completions"])
3089 else:
3088 else:
3090 sortable.extend(result["completions"])
3089 sortable.extend(result["completions"])
3091
3090
3092 if not most_recent_fragment:
3091 if not most_recent_fragment:
3093 most_recent_fragment = "" # to satisfy typechecker (and just in case)
3092 most_recent_fragment = "" # to satisfy typechecker (and just in case)
3094
3093
3095 return most_recent_fragment, [
3094 return most_recent_fragment, [
3096 m.text for m in self._deduplicate(ordered + self._sort(sortable))
3095 m.text for m in self._deduplicate(ordered + self._sort(sortable))
3097 ]
3096 ]
3098
3097
3099 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
3098 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
3100 full_text=None) -> _CompleteResult:
3099 full_text=None) -> _CompleteResult:
3101 """
3100 """
3102 Like complete but can also returns raw jedi completions as well as the
3101 Like complete but can also returns raw jedi completions as well as the
3103 origin of the completion text. This could (and should) be made much
3102 origin of the completion text. This could (and should) be made much
3104 cleaner but that will be simpler once we drop the old (and stateful)
3103 cleaner but that will be simpler once we drop the old (and stateful)
3105 :any:`complete` API.
3104 :any:`complete` API.
3106
3105
3107 With current provisional API, cursor_pos act both (depending on the
3106 With current provisional API, cursor_pos act both (depending on the
3108 caller) as the offset in the ``text`` or ``line_buffer``, or as the
3107 caller) as the offset in the ``text`` or ``line_buffer``, or as the
3109 ``column`` when passing multiline strings this could/should be renamed
3108 ``column`` when passing multiline strings this could/should be renamed
3110 but would add extra noise.
3109 but would add extra noise.
3111
3110
3112 Parameters
3111 Parameters
3113 ----------
3112 ----------
3114 cursor_line
3113 cursor_line
3115 Index of the line the cursor is on. 0 indexed.
3114 Index of the line the cursor is on. 0 indexed.
3116 cursor_pos
3115 cursor_pos
3117 Position of the cursor in the current line/line_buffer/text. 0
3116 Position of the cursor in the current line/line_buffer/text. 0
3118 indexed.
3117 indexed.
3119 line_buffer : optional, str
3118 line_buffer : optional, str
3120 The current line the cursor is in, this is mostly due to legacy
3119 The current line the cursor is in, this is mostly due to legacy
3121 reason that readline could only give a us the single current line.
3120 reason that readline could only give a us the single current line.
3122 Prefer `full_text`.
3121 Prefer `full_text`.
3123 text : str
3122 text : str
3124 The current "token" the cursor is in, mostly also for historical
3123 The current "token" the cursor is in, mostly also for historical
3125 reasons. as the completer would trigger only after the current line
3124 reasons. as the completer would trigger only after the current line
3126 was parsed.
3125 was parsed.
3127 full_text : str
3126 full_text : str
3128 Full text of the current cell.
3127 Full text of the current cell.
3129
3128
3130 Returns
3129 Returns
3131 -------
3130 -------
3132 An ordered dictionary where keys are identifiers of completion
3131 An ordered dictionary where keys are identifiers of completion
3133 matchers and values are ``MatcherResult``s.
3132 matchers and values are ``MatcherResult``s.
3134 """
3133 """
3135
3134
3136 # if the cursor position isn't given, the only sane assumption we can
3135 # if the cursor position isn't given, the only sane assumption we can
3137 # make is that it's at the end of the line (the common case)
3136 # make is that it's at the end of the line (the common case)
3138 if cursor_pos is None:
3137 if cursor_pos is None:
3139 cursor_pos = len(line_buffer) if text is None else len(text)
3138 cursor_pos = len(line_buffer) if text is None else len(text)
3140
3139
3141 if self.use_main_ns:
3140 if self.use_main_ns:
3142 self.namespace = __main__.__dict__
3141 self.namespace = __main__.__dict__
3143
3142
3144 # if text is either None or an empty string, rely on the line buffer
3143 # if text is either None or an empty string, rely on the line buffer
3145 if (not line_buffer) and full_text:
3144 if (not line_buffer) and full_text:
3146 line_buffer = full_text.split('\n')[cursor_line]
3145 line_buffer = full_text.split('\n')[cursor_line]
3147 if not text: # issue #11508: check line_buffer before calling split_line
3146 if not text: # issue #11508: check line_buffer before calling split_line
3148 text = (
3147 text = (
3149 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
3148 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
3150 )
3149 )
3151
3150
3152 # If no line buffer is given, assume the input text is all there was
3151 # If no line buffer is given, assume the input text is all there was
3153 if line_buffer is None:
3152 if line_buffer is None:
3154 line_buffer = text
3153 line_buffer = text
3155
3154
3156 # deprecated - do not use `line_buffer` in new code.
3155 # deprecated - do not use `line_buffer` in new code.
3157 self.line_buffer = line_buffer
3156 self.line_buffer = line_buffer
3158 self.text_until_cursor = self.line_buffer[:cursor_pos]
3157 self.text_until_cursor = self.line_buffer[:cursor_pos]
3159
3158
3160 if not full_text:
3159 if not full_text:
3161 full_text = line_buffer
3160 full_text = line_buffer
3162
3161
3163 context = CompletionContext(
3162 context = CompletionContext(
3164 full_text=full_text,
3163 full_text=full_text,
3165 cursor_position=cursor_pos,
3164 cursor_position=cursor_pos,
3166 cursor_line=cursor_line,
3165 cursor_line=cursor_line,
3167 token=text,
3166 token=text,
3168 limit=MATCHES_LIMIT,
3167 limit=MATCHES_LIMIT,
3169 )
3168 )
3170
3169
3171 # Start with a clean slate of completions
3170 # Start with a clean slate of completions
3172 results: Dict[str, MatcherResult] = {}
3171 results: Dict[str, MatcherResult] = {}
3173
3172
3174 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3173 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3175
3174
3176 suppressed_matchers: Set[str] = set()
3175 suppressed_matchers: Set[str] = set()
3177
3176
3178 matchers = {
3177 matchers = {
3179 _get_matcher_id(matcher): matcher
3178 _get_matcher_id(matcher): matcher
3180 for matcher in sorted(
3179 for matcher in sorted(
3181 self.matchers, key=_get_matcher_priority, reverse=True
3180 self.matchers, key=_get_matcher_priority, reverse=True
3182 )
3181 )
3183 }
3182 }
3184
3183
3185 for matcher_id, matcher in matchers.items():
3184 for matcher_id, matcher in matchers.items():
3186 matcher_id = _get_matcher_id(matcher)
3185 matcher_id = _get_matcher_id(matcher)
3187
3186
3188 if matcher_id in self.disable_matchers:
3187 if matcher_id in self.disable_matchers:
3189 continue
3188 continue
3190
3189
3191 if matcher_id in results:
3190 if matcher_id in results:
3192 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
3191 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
3193
3192
3194 if matcher_id in suppressed_matchers:
3193 if matcher_id in suppressed_matchers:
3195 continue
3194 continue
3196
3195
3197 result: MatcherResult
3196 result: MatcherResult
3198 try:
3197 try:
3199 if _is_matcher_v1(matcher):
3198 if _is_matcher_v1(matcher):
3200 result = _convert_matcher_v1_result_to_v2(
3199 result = _convert_matcher_v1_result_to_v2(
3201 matcher(text), type=_UNKNOWN_TYPE
3200 matcher(text), type=_UNKNOWN_TYPE
3202 )
3201 )
3203 elif _is_matcher_v2(matcher):
3202 elif _is_matcher_v2(matcher):
3204 result = matcher(context)
3203 result = matcher(context)
3205 else:
3204 else:
3206 api_version = _get_matcher_api_version(matcher)
3205 api_version = _get_matcher_api_version(matcher)
3207 raise ValueError(f"Unsupported API version {api_version}")
3206 raise ValueError(f"Unsupported API version {api_version}")
3208 except:
3207 except:
3209 # Show the ugly traceback if the matcher causes an
3208 # Show the ugly traceback if the matcher causes an
3210 # exception, but do NOT crash the kernel!
3209 # exception, but do NOT crash the kernel!
3211 sys.excepthook(*sys.exc_info())
3210 sys.excepthook(*sys.exc_info())
3212 continue
3211 continue
3213
3212
3214 # set default value for matched fragment if suffix was not selected.
3213 # set default value for matched fragment if suffix was not selected.
3215 result["matched_fragment"] = result.get("matched_fragment", context.token)
3214 result["matched_fragment"] = result.get("matched_fragment", context.token)
3216
3215
3217 if not suppressed_matchers:
3216 if not suppressed_matchers:
3218 suppression_recommended: Union[bool, Set[str]] = result.get(
3217 suppression_recommended: Union[bool, Set[str]] = result.get(
3219 "suppress", False
3218 "suppress", False
3220 )
3219 )
3221
3220
3222 suppression_config = (
3221 suppression_config = (
3223 self.suppress_competing_matchers.get(matcher_id, None)
3222 self.suppress_competing_matchers.get(matcher_id, None)
3224 if isinstance(self.suppress_competing_matchers, dict)
3223 if isinstance(self.suppress_competing_matchers, dict)
3225 else self.suppress_competing_matchers
3224 else self.suppress_competing_matchers
3226 )
3225 )
3227 should_suppress = (
3226 should_suppress = (
3228 (suppression_config is True)
3227 (suppression_config is True)
3229 or (suppression_recommended and (suppression_config is not False))
3228 or (suppression_recommended and (suppression_config is not False))
3230 ) and has_any_completions(result)
3229 ) and has_any_completions(result)
3231
3230
3232 if should_suppress:
3231 if should_suppress:
3233 suppression_exceptions: Set[str] = result.get(
3232 suppression_exceptions: Set[str] = result.get(
3234 "do_not_suppress", set()
3233 "do_not_suppress", set()
3235 )
3234 )
3236 if isinstance(suppression_recommended, Iterable):
3235 if isinstance(suppression_recommended, Iterable):
3237 to_suppress = set(suppression_recommended)
3236 to_suppress = set(suppression_recommended)
3238 else:
3237 else:
3239 to_suppress = set(matchers)
3238 to_suppress = set(matchers)
3240 suppressed_matchers = to_suppress - suppression_exceptions
3239 suppressed_matchers = to_suppress - suppression_exceptions
3241
3240
3242 new_results = {}
3241 new_results = {}
3243 for previous_matcher_id, previous_result in results.items():
3242 for previous_matcher_id, previous_result in results.items():
3244 if previous_matcher_id not in suppressed_matchers:
3243 if previous_matcher_id not in suppressed_matchers:
3245 new_results[previous_matcher_id] = previous_result
3244 new_results[previous_matcher_id] = previous_result
3246 results = new_results
3245 results = new_results
3247
3246
3248 results[matcher_id] = result
3247 results[matcher_id] = result
3249
3248
3250 _, matches = self._arrange_and_extract(
3249 _, matches = self._arrange_and_extract(
3251 results,
3250 results,
3252 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
3251 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
3253 # if it was omission, we can remove the filtering step, otherwise remove this comment.
3252 # if it was omission, we can remove the filtering step, otherwise remove this comment.
3254 skip_matchers={jedi_matcher_id},
3253 skip_matchers={jedi_matcher_id},
3255 abort_if_offset_changes=False,
3254 abort_if_offset_changes=False,
3256 )
3255 )
3257
3256
3258 # populate legacy stateful API
3257 # populate legacy stateful API
3259 self.matches = matches
3258 self.matches = matches
3260
3259
3261 return results
3260 return results
3262
3261
3263 @staticmethod
3262 @staticmethod
3264 def _deduplicate(
3263 def _deduplicate(
3265 matches: Sequence[AnyCompletion],
3264 matches: Sequence[AnyCompletion],
3266 ) -> Iterable[AnyCompletion]:
3265 ) -> Iterable[AnyCompletion]:
3267 filtered_matches: Dict[str, AnyCompletion] = {}
3266 filtered_matches: Dict[str, AnyCompletion] = {}
3268 for match in matches:
3267 for match in matches:
3269 text = match.text
3268 text = match.text
3270 if (
3269 if (
3271 text not in filtered_matches
3270 text not in filtered_matches
3272 or filtered_matches[text].type == _UNKNOWN_TYPE
3271 or filtered_matches[text].type == _UNKNOWN_TYPE
3273 ):
3272 ):
3274 filtered_matches[text] = match
3273 filtered_matches[text] = match
3275
3274
3276 return filtered_matches.values()
3275 return filtered_matches.values()
3277
3276
3278 @staticmethod
3277 @staticmethod
3279 def _sort(matches: Sequence[AnyCompletion]):
3278 def _sort(matches: Sequence[AnyCompletion]):
3280 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3279 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3281
3280
3282 @context_matcher()
3281 @context_matcher()
3283 def fwd_unicode_matcher(self, context: CompletionContext):
3282 def fwd_unicode_matcher(self, context: CompletionContext):
3284 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
3283 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
3285 # TODO: use `context.limit` to terminate early once we matched the maximum
3284 # TODO: use `context.limit` to terminate early once we matched the maximum
3286 # number that will be used downstream; can be added as an optional to
3285 # number that will be used downstream; can be added as an optional to
3287 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
3286 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
3288 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
3287 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
3289 return _convert_matcher_v1_result_to_v2(
3288 return _convert_matcher_v1_result_to_v2(
3290 matches, type="unicode", fragment=fragment, suppress_if_matches=True
3289 matches, type="unicode", fragment=fragment, suppress_if_matches=True
3291 )
3290 )
3292
3291
3293 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
3292 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
3294 """
3293 """
3295 Forward match a string starting with a backslash with a list of
3294 Forward match a string starting with a backslash with a list of
3296 potential Unicode completions.
3295 potential Unicode completions.
3297
3296
3298 Will compute list of Unicode character names on first call and cache it.
3297 Will compute list of Unicode character names on first call and cache it.
3299
3298
3300 .. deprecated:: 8.6
3299 .. deprecated:: 8.6
3301 You can use :meth:`fwd_unicode_matcher` instead.
3300 You can use :meth:`fwd_unicode_matcher` instead.
3302
3301
3303 Returns
3302 Returns
3304 -------
3303 -------
3305 At tuple with:
3304 At tuple with:
3306 - matched text (empty if no matches)
3305 - matched text (empty if no matches)
3307 - list of potential completions, empty tuple otherwise)
3306 - list of potential completions, empty tuple otherwise)
3308 """
3307 """
3309 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
3308 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
3310 # We could do a faster match using a Trie.
3309 # We could do a faster match using a Trie.
3311
3310
3312 # Using pygtrie the following seem to work:
3311 # Using pygtrie the following seem to work:
3313
3312
3314 # s = PrefixSet()
3313 # s = PrefixSet()
3315
3314
3316 # for c in range(0,0x10FFFF + 1):
3315 # for c in range(0,0x10FFFF + 1):
3317 # try:
3316 # try:
3318 # s.add(unicodedata.name(chr(c)))
3317 # s.add(unicodedata.name(chr(c)))
3319 # except ValueError:
3318 # except ValueError:
3320 # pass
3319 # pass
3321 # [''.join(k) for k in s.iter(prefix)]
3320 # [''.join(k) for k in s.iter(prefix)]
3322
3321
3323 # But need to be timed and adds an extra dependency.
3322 # But need to be timed and adds an extra dependency.
3324
3323
3325 slashpos = text.rfind('\\')
3324 slashpos = text.rfind('\\')
3326 # if text starts with slash
3325 # if text starts with slash
3327 if slashpos > -1:
3326 if slashpos > -1:
3328 # PERF: It's important that we don't access self._unicode_names
3327 # PERF: It's important that we don't access self._unicode_names
3329 # until we're inside this if-block. _unicode_names is lazily
3328 # until we're inside this if-block. _unicode_names is lazily
3330 # initialized, and it takes a user-noticeable amount of time to
3329 # initialized, and it takes a user-noticeable amount of time to
3331 # initialize it, so we don't want to initialize it unless we're
3330 # initialize it, so we don't want to initialize it unless we're
3332 # actually going to use it.
3331 # actually going to use it.
3333 s = text[slashpos + 1 :]
3332 s = text[slashpos + 1 :]
3334 sup = s.upper()
3333 sup = s.upper()
3335 candidates = [x for x in self.unicode_names if x.startswith(sup)]
3334 candidates = [x for x in self.unicode_names if x.startswith(sup)]
3336 if candidates:
3335 if candidates:
3337 return s, candidates
3336 return s, candidates
3338 candidates = [x for x in self.unicode_names if sup in x]
3337 candidates = [x for x in self.unicode_names if sup in x]
3339 if candidates:
3338 if candidates:
3340 return s, candidates
3339 return s, candidates
3341 splitsup = sup.split(" ")
3340 splitsup = sup.split(" ")
3342 candidates = [
3341 candidates = [
3343 x for x in self.unicode_names if all(u in x for u in splitsup)
3342 x for x in self.unicode_names if all(u in x for u in splitsup)
3344 ]
3343 ]
3345 if candidates:
3344 if candidates:
3346 return s, candidates
3345 return s, candidates
3347
3346
3348 return "", ()
3347 return "", ()
3349
3348
3350 # if text does not start with slash
3349 # if text does not start with slash
3351 else:
3350 else:
3352 return '', ()
3351 return '', ()
3353
3352
3354 @property
3353 @property
3355 def unicode_names(self) -> List[str]:
3354 def unicode_names(self) -> List[str]:
3356 """List of names of unicode code points that can be completed.
3355 """List of names of unicode code points that can be completed.
3357
3356
3358 The list is lazily initialized on first access.
3357 The list is lazily initialized on first access.
3359 """
3358 """
3360 if self._unicode_names is None:
3359 if self._unicode_names is None:
3361 names = []
3360 names = []
3362 for c in range(0,0x10FFFF + 1):
3361 for c in range(0,0x10FFFF + 1):
3363 try:
3362 try:
3364 names.append(unicodedata.name(chr(c)))
3363 names.append(unicodedata.name(chr(c)))
3365 except ValueError:
3364 except ValueError:
3366 pass
3365 pass
3367 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
3366 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
3368
3367
3369 return self._unicode_names
3368 return self._unicode_names
3370
3369
3371 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
3370 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
3372 names = []
3371 names = []
3373 for start,stop in ranges:
3372 for start,stop in ranges:
3374 for c in range(start, stop) :
3373 for c in range(start, stop) :
3375 try:
3374 try:
3376 names.append(unicodedata.name(chr(c)))
3375 names.append(unicodedata.name(chr(c)))
3377 except ValueError:
3376 except ValueError:
3378 pass
3377 pass
3379 return names
3378 return names
General Comments 0
You need to be logged in to leave comments. Login now