##// END OF EJS Templates
misc fixes
M Bussonnier -
Show More
@@ -1,106 +1,106
1 1 name: Run tests
2 2
3 3 on:
4 4 push:
5 5 branches:
6 6 - main
7 7 - '*.x'
8 8 pull_request:
9 9 # Run weekly on Monday at 1:23 UTC
10 10 schedule:
11 11 - cron: '23 1 * * 1'
12 12 workflow_dispatch:
13 13
14 14
15 15 jobs:
16 16 test:
17 17 runs-on: ${{ matrix.os }}
18 18 # Disable scheduled CI runs on forks
19 19 if: github.event_name != 'schedule' || github.repository_owner == 'ipython'
20 20 strategy:
21 21 fail-fast: false
22 22 matrix:
23 23 os: [ubuntu-latest, windows-latest]
24 24 python-version: ["3.11", "3.12","3.13"]
25 25 deps: [test_extra]
26 26 # Test all on ubuntu, test ends on macos
27 27 include:
28 28 - os: macos-latest
29 29 python-version: "3.11"
30 30 deps: test_extra
31 31 # Tests minimal dependencies set
32 32 - os: ubuntu-latest
33 33 python-version: "3.11"
34 34 deps: test
35 35 # Tests latest development Python version
36 36 - os: ubuntu-latest
37 37 python-version: "3.13"
38 38 deps: test
39 39 # Installing optional dependencies stuff takes ages on PyPy
40 - os: ubuntu-latest
41 python-version: "pypy-3.11"
42 deps: test
43 - os: windows-latest
44 python-version: "pypy-3.11"
45 deps: test
46 - os: macos-latest
47 python-version: "pypy-3.11"
48 deps: test
40 # - os: ubuntu-latest
41 # python-version: "pypy-3.11"
42 # deps: test
43 # - os: windows-latest
44 # python-version: "pypy-3.11"
45 # deps: test
46 # - os: macos-latest
47 # python-version: "pypy-3.11"
48 # deps: test
49 49 # Temporary CI run to use entry point compatible code in matplotlib-inline.
50 50 - os: ubuntu-latest
51 51 python-version: "3.12"
52 52 deps: test_extra
53 53 want-latest-entry-point-code: true
54 54
55 55 steps:
56 56 - uses: actions/checkout@v4
57 57 - name: Set up Python ${{ matrix.python-version }}
58 58 uses: actions/setup-python@v5
59 59 with:
60 60 python-version: ${{ matrix.python-version }}
61 61 cache: pip
62 62 cache-dependency-path: |
63 63 pyproject.toml
64 64 - name: Install latex
65 65 if: runner.os == 'Linux' && matrix.deps == 'test_extra'
66 66 run: echo "disable latex for now, issues in mirros" #sudo apt-get -yq -o Acquire::Retries=3 --no-install-suggests --no-install-recommends install texlive dvipng
67 67 - name: Install and update Python dependencies (binary only)
68 68 if: ${{ ! contains( matrix.python-version, 'dev' ) }}
69 69 run: |
70 70 python -m pip install --only-binary ':all:' --upgrade pip setuptools wheel build
71 71 python -m pip install --only-binary ':all:' --no-binary curio --upgrade -e .[${{ matrix.deps }}]
72 72 python -m pip install --only-binary ':all:' --upgrade check-manifest pytest-cov 'pytest<8'
73 73 - name: Install and update Python dependencies (dev?)
74 74 if: ${{ contains( matrix.python-version, 'dev' ) }}
75 75 run: |
76 76 python -m pip install --pre --upgrade pip setuptools wheel build
77 77 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --no-binary curio --upgrade -e .[${{ matrix.deps }}]
78 78 python -m pip install --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple --upgrade check-manifest pytest-cov
79 79 - name: Try building with Python build
80 80 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
81 81 run: |
82 82 python -m build
83 83 shasum -a 256 dist/*
84 84 - name: Check manifest
85 85 if: runner.os != 'Windows' # setup.py does not support sdist on Windows
86 86 run: check-manifest
87 87
88 - name: Install entry point compatible code (TEMPORARY)
88 - name: Install entry point compatible code (TEMPORARY, April 2024)
89 89 if: matrix.want-latest-entry-point-code
90 90 run: |
91 91 python -m pip list
92 92 # Not installing matplotlib's entry point code as building matplotlib from source is complex.
93 93 # Rely upon matplotlib to test all the latest entry point branches together.
94 94 python -m pip install --upgrade git+https://github.com/ipython/matplotlib-inline.git@main
95 95 python -m pip list
96 96
97 97 - name: pytest
98 98 env:
99 99 COLUMNS: 120
100 100 run: |
101 101 pytest --color=yes -raXxs ${{ startsWith(matrix.python-version, 'pypy') && ' ' || '--cov --cov-report=xml' }} --maxfail=15
102 102 - name: Upload coverage to Codecov
103 103 uses: codecov/codecov-action@v4
104 104 with:
105 105 name: Test
106 106 files: /home/runner/work/ipython/ipython/coverage.xml
@@ -1,3379 +1,3378
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 Ξ±
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 Ξ±
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press :kbd:`Tab` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\Ξ±<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 :std:configtrait:`Completer.backslash_combining_completions` option to
63 63 ``False``.
64 64
65 65
66 66 Experimental
67 67 ============
68 68
69 69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 70 generate completions both using static analysis of the code, and dynamically
71 71 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
72 72 for Python. The APIs attached to this new mechanism is unstable and will
73 73 raise unless use in an :any:`provisionalcompleter` context manager.
74 74
75 75 You will find that the following are experimental:
76 76
77 77 - :any:`provisionalcompleter`
78 78 - :any:`IPCompleter.completions`
79 79 - :any:`Completion`
80 80 - :any:`rectify_completions`
81 81
82 82 .. note::
83 83
84 84 better name for :any:`rectify_completions` ?
85 85
86 86 We welcome any feedback on these new API, and we also encourage you to try this
87 87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 88 to have extra logging information if :any:`jedi` is crashing, or if current
89 89 IPython completer pending deprecations are returning results not yet handled
90 90 by :any:`jedi`
91 91
92 92 Using Jedi for tab completion allow snippets like the following to work without
93 93 having to execute any code:
94 94
95 95 >>> myvar = ['hello', 42]
96 96 ... myvar[1].bi<tab>
97 97
98 98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 99 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
100 100 option.
101 101
102 102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 103 current development version to get better completions.
104 104
105 105 Matchers
106 106 ========
107 107
108 108 All completions routines are implemented using unified *Matchers* API.
109 109 The matchers API is provisional and subject to change without notice.
110 110
111 111 The built-in matchers include:
112 112
113 113 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
114 114 - :any:`IPCompleter.magic_matcher`: completions for magics,
115 115 - :any:`IPCompleter.unicode_name_matcher`,
116 116 :any:`IPCompleter.fwd_unicode_matcher`
117 117 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
118 118 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
119 119 - :any:`IPCompleter.file_matcher`: paths to files and directories,
120 120 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
121 121 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
122 122 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
123 123 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
124 124 implementation in :any:`InteractiveShell` which uses IPython hooks system
125 125 (`complete_command`) with string dispatch (including regular expressions).
126 126 Differently to other matchers, ``custom_completer_matcher`` will not suppress
127 127 Jedi results to match behaviour in earlier IPython versions.
128 128
129 129 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
130 130
131 131 Matcher API
132 132 -----------
133 133
134 134 Simplifying some details, the ``Matcher`` interface can described as
135 135
136 136 .. code-block::
137 137
138 138 MatcherAPIv1 = Callable[[str], list[str]]
139 139 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
140 140
141 141 Matcher = MatcherAPIv1 | MatcherAPIv2
142 142
143 143 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
144 144 and remains supported as a simplest way for generating completions. This is also
145 145 currently the only API supported by the IPython hooks system `complete_command`.
146 146
147 147 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
148 148 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
149 149 and requires a literal ``2`` for v2 Matchers.
150 150
151 151 Once the API stabilises future versions may relax the requirement for specifying
152 152 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
153 153 please do not rely on the presence of ``matcher_api_version`` for any purposes.
154 154
155 155 Suppression of competing matchers
156 156 ---------------------------------
157 157
158 158 By default results from all matchers are combined, in the order determined by
159 159 their priority. Matchers can request to suppress results from subsequent
160 160 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
161 161
162 162 When multiple matchers simultaneously request suppression, the results from of
163 163 the matcher with higher priority will be returned.
164 164
165 165 Sometimes it is desirable to suppress most but not all other matchers;
166 166 this can be achieved by adding a set of identifiers of matchers which
167 167 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
168 168
169 169 The suppression behaviour can is user-configurable via
170 170 :std:configtrait:`IPCompleter.suppress_competing_matchers`.
171 171 """
172 172
173 173
174 174 # Copyright (c) IPython Development Team.
175 175 # Distributed under the terms of the Modified BSD License.
176 176 #
177 177 # Some of this code originated from rlcompleter in the Python standard library
178 178 # Copyright (C) 2001 Python Software Foundation, www.python.org
179 179
180 180 from __future__ import annotations
181 181 import builtins as builtin_mod
182 182 import enum
183 183 import glob
184 184 import inspect
185 185 import itertools
186 186 import keyword
187 187 import os
188 188 import re
189 189 import string
190 190 import sys
191 191 import tokenize
192 192 import time
193 193 import unicodedata
194 194 import uuid
195 195 import warnings
196 196 from ast import literal_eval
197 197 from collections import defaultdict
198 198 from contextlib import contextmanager
199 199 from dataclasses import dataclass
200 200 from functools import cached_property, partial
201 201 from types import SimpleNamespace
202 202 from typing import (
203 203 Iterable,
204 204 Iterator,
205 205 List,
206 206 Tuple,
207 207 Union,
208 208 Any,
209 209 Sequence,
210 210 Dict,
211 211 Optional,
212 212 TYPE_CHECKING,
213 213 Set,
214 214 Sized,
215 215 TypeVar,
216 216 Literal,
217 217 )
218 218
219 219 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
220 220 from IPython.core.error import TryNext
221 221 from IPython.core.inputtransformer2 import ESC_MAGIC
222 222 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
223 223 from IPython.core.oinspect import InspectColors
224 224 from IPython.testing.skipdoctest import skip_doctest
225 225 from IPython.utils import generics
226 226 from IPython.utils.decorators import sphinx_options
227 227 from IPython.utils.dir2 import dir2, get_real_method
228 228 from IPython.utils.docs import GENERATING_DOCUMENTATION
229 229 from IPython.utils.path import ensure_dir_exists
230 230 from IPython.utils.process import arg_split
231 231 from traitlets import (
232 232 Bool,
233 233 Enum,
234 234 Int,
235 235 List as ListTrait,
236 236 Unicode,
237 237 Dict as DictTrait,
238 238 Union as UnionTrait,
239 239 observe,
240 240 )
241 241 from traitlets.config.configurable import Configurable
242 242
243 243 import __main__
244 244
245 245 from typing import cast
246 246
247 247 if sys.version_info < (3, 12):
248 248 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
249 249 else:
250 250 from typing import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
251 251
252 252
253 253 # skip module docstests
254 254 __skip_doctest__ = True
255 255
256 256
257 257 try:
258 258 import jedi
259 259 jedi.settings.case_insensitive_completion = False
260 260 import jedi.api.helpers
261 261 import jedi.api.classes
262 262 JEDI_INSTALLED = True
263 263 except ImportError:
264 264 JEDI_INSTALLED = False
265 265
266 266
267
268 267 # -----------------------------------------------------------------------------
269 268 # Globals
270 269 #-----------------------------------------------------------------------------
271 270
272 271 # ranges where we have most of the valid unicode names. We could be more finer
273 272 # grained but is it worth it for performance While unicode have character in the
274 273 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
275 274 # write this). With below range we cover them all, with a density of ~67%
276 275 # biggest next gap we consider only adds up about 1% density and there are 600
277 276 # gaps that would need hard coding.
278 277 _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
279 278
280 279 # Public API
281 280 __all__ = ["Completer", "IPCompleter"]
282 281
283 282 if sys.platform == 'win32':
284 283 PROTECTABLES = ' '
285 284 else:
286 285 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
287 286
288 287 # Protect against returning an enormous number of completions which the frontend
289 288 # may have trouble processing.
290 289 MATCHES_LIMIT = 500
291 290
292 291 # Completion type reported when no type can be inferred.
293 292 _UNKNOWN_TYPE = "<unknown>"
294 293
295 294 # sentinel value to signal lack of a match
296 295 not_found = object()
297 296
298 297 class ProvisionalCompleterWarning(FutureWarning):
299 298 """
300 299 Exception raise by an experimental feature in this module.
301 300
302 301 Wrap code in :any:`provisionalcompleter` context manager if you
303 302 are certain you want to use an unstable feature.
304 303 """
305 304 pass
306 305
307 306 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
308 307
309 308
310 309 @skip_doctest
311 310 @contextmanager
312 311 def provisionalcompleter(action='ignore'):
313 312 """
314 313 This context manager has to be used in any place where unstable completer
315 314 behavior and API may be called.
316 315
317 316 >>> with provisionalcompleter():
318 317 ... completer.do_experimental_things() # works
319 318
320 319 >>> completer.do_experimental_things() # raises.
321 320
322 321 .. note::
323 322
324 323 Unstable
325 324
326 325 By using this context manager you agree that the API in use may change
327 326 without warning, and that you won't complain if they do so.
328 327
329 328 You also understand that, if the API is not to your liking, you should report
330 329 a bug to explain your use case upstream.
331 330
332 331 We'll be happy to get your feedback, feature requests, and improvements on
333 332 any of the unstable APIs!
334 333 """
335 334 with warnings.catch_warnings():
336 335 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
337 336 yield
338 337
339 338
340 339 def has_open_quotes(s):
341 340 """Return whether a string has open quotes.
342 341
343 342 This simply counts whether the number of quote characters of either type in
344 343 the string is odd.
345 344
346 345 Returns
347 346 -------
348 347 If there is an open quote, the quote character is returned. Else, return
349 348 False.
350 349 """
351 350 # We check " first, then ', so complex cases with nested quotes will get
352 351 # the " to take precedence.
353 352 if s.count('"') % 2:
354 353 return '"'
355 354 elif s.count("'") % 2:
356 355 return "'"
357 356 else:
358 357 return False
359 358
360 359
361 360 def protect_filename(s, protectables=PROTECTABLES):
362 361 """Escape a string to protect certain characters."""
363 362 if set(s) & set(protectables):
364 363 if sys.platform == "win32":
365 364 return '"' + s + '"'
366 365 else:
367 366 return "".join(("\\" + c if c in protectables else c) for c in s)
368 367 else:
369 368 return s
370 369
371 370
372 371 def expand_user(path:str) -> Tuple[str, bool, str]:
373 372 """Expand ``~``-style usernames in strings.
374 373
375 374 This is similar to :func:`os.path.expanduser`, but it computes and returns
376 375 extra information that will be useful if the input was being used in
377 376 computing completions, and you wish to return the completions with the
378 377 original '~' instead of its expanded value.
379 378
380 379 Parameters
381 380 ----------
382 381 path : str
383 382 String to be expanded. If no ~ is present, the output is the same as the
384 383 input.
385 384
386 385 Returns
387 386 -------
388 387 newpath : str
389 388 Result of ~ expansion in the input path.
390 389 tilde_expand : bool
391 390 Whether any expansion was performed or not.
392 391 tilde_val : str
393 392 The value that ~ was replaced with.
394 393 """
395 394 # Default values
396 395 tilde_expand = False
397 396 tilde_val = ''
398 397 newpath = path
399 398
400 399 if path.startswith('~'):
401 400 tilde_expand = True
402 401 rest = len(path)-1
403 402 newpath = os.path.expanduser(path)
404 403 if rest:
405 404 tilde_val = newpath[:-rest]
406 405 else:
407 406 tilde_val = newpath
408 407
409 408 return newpath, tilde_expand, tilde_val
410 409
411 410
412 411 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
413 412 """Does the opposite of expand_user, with its outputs.
414 413 """
415 414 if tilde_expand:
416 415 return path.replace(tilde_val, '~')
417 416 else:
418 417 return path
419 418
420 419
421 420 def completions_sorting_key(word):
422 421 """key for sorting completions
423 422
424 423 This does several things:
425 424
426 425 - Demote any completions starting with underscores to the end
427 426 - Insert any %magic and %%cellmagic completions in the alphabetical order
428 427 by their name
429 428 """
430 429 prio1, prio2 = 0, 0
431 430
432 431 if word.startswith('__'):
433 432 prio1 = 2
434 433 elif word.startswith('_'):
435 434 prio1 = 1
436 435
437 436 if word.endswith('='):
438 437 prio1 = -1
439 438
440 439 if word.startswith('%%'):
441 440 # If there's another % in there, this is something else, so leave it alone
442 441 if not "%" in word[2:]:
443 442 word = word[2:]
444 443 prio2 = 2
445 444 elif word.startswith('%'):
446 445 if not "%" in word[1:]:
447 446 word = word[1:]
448 447 prio2 = 1
449 448
450 449 return prio1, word, prio2
451 450
452 451
453 452 class _FakeJediCompletion:
454 453 """
455 454 This is a workaround to communicate to the UI that Jedi has crashed and to
456 455 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
457 456
458 457 Added in IPython 6.0 so should likely be removed for 7.0
459 458
460 459 """
461 460
462 461 def __init__(self, name):
463 462
464 463 self.name = name
465 464 self.complete = name
466 465 self.type = 'crashed'
467 466 self.name_with_symbols = name
468 467 self.signature = ""
469 468 self._origin = "fake"
470 469 self.text = "crashed"
471 470
472 471 def __repr__(self):
473 472 return '<Fake completion object jedi has crashed>'
474 473
475 474
476 475 _JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
477 476
478 477
479 478 class Completion:
480 479 """
481 480 Completion object used and returned by IPython completers.
482 481
483 482 .. warning::
484 483
485 484 Unstable
486 485
487 486 This function is unstable, API may change without warning.
488 487 It will also raise unless use in proper context manager.
489 488
490 489 This act as a middle ground :any:`Completion` object between the
491 490 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
492 491 object. While Jedi need a lot of information about evaluator and how the
493 492 code should be ran/inspected, PromptToolkit (and other frontend) mostly
494 493 need user facing information.
495 494
496 495 - Which range should be replaced replaced by what.
497 496 - Some metadata (like completion type), or meta information to displayed to
498 497 the use user.
499 498
500 499 For debugging purpose we can also store the origin of the completion (``jedi``,
501 500 ``IPython.python_matches``, ``IPython.magics_matches``...).
502 501 """
503 502
504 503 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
505 504
506 505 def __init__(
507 506 self,
508 507 start: int,
509 508 end: int,
510 509 text: str,
511 510 *,
512 511 type: Optional[str] = None,
513 512 _origin="",
514 513 signature="",
515 514 ) -> None:
516 515 warnings.warn(
517 516 "``Completion`` is a provisional API (as of IPython 6.0). "
518 517 "It may change without warnings. "
519 518 "Use in corresponding context manager.",
520 519 category=ProvisionalCompleterWarning,
521 520 stacklevel=2,
522 521 )
523 522
524 523 self.start = start
525 524 self.end = end
526 525 self.text = text
527 526 self.type = type
528 527 self.signature = signature
529 528 self._origin = _origin
530 529
531 530 def __repr__(self):
532 531 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
533 532 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
534 533
535 534 def __eq__(self, other) -> bool:
536 535 """
537 536 Equality and hash do not hash the type (as some completer may not be
538 537 able to infer the type), but are use to (partially) de-duplicate
539 538 completion.
540 539
541 540 Completely de-duplicating completion is a bit tricker that just
542 541 comparing as it depends on surrounding text, which Completions are not
543 542 aware of.
544 543 """
545 544 return self.start == other.start and \
546 545 self.end == other.end and \
547 546 self.text == other.text
548 547
549 548 def __hash__(self):
550 549 return hash((self.start, self.end, self.text))
551 550
552 551
553 552 class SimpleCompletion:
554 553 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
555 554
556 555 .. warning::
557 556
558 557 Provisional
559 558
560 559 This class is used to describe the currently supported attributes of
561 560 simple completion items, and any additional implementation details
562 561 should not be relied on. Additional attributes may be included in
563 562 future versions, and meaning of text disambiguated from the current
564 563 dual meaning of "text to insert" and "text to used as a label".
565 564 """
566 565
567 566 __slots__ = ["text", "type"]
568 567
569 568 def __init__(self, text: str, *, type: Optional[str] = None):
570 569 self.text = text
571 570 self.type = type
572 571
573 572 def __repr__(self):
574 573 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
575 574
576 575
577 576 class _MatcherResultBase(TypedDict):
578 577 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
579 578
580 579 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
581 580 matched_fragment: NotRequired[str]
582 581
583 582 #: Whether to suppress results from all other matchers (True), some
584 583 #: matchers (set of identifiers) or none (False); default is False.
585 584 suppress: NotRequired[Union[bool, Set[str]]]
586 585
587 586 #: Identifiers of matchers which should NOT be suppressed when this matcher
588 587 #: requests to suppress all other matchers; defaults to an empty set.
589 588 do_not_suppress: NotRequired[Set[str]]
590 589
591 590 #: Are completions already ordered and should be left as-is? default is False.
592 591 ordered: NotRequired[bool]
593 592
594 593
595 594 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
596 595 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
597 596 """Result of new-style completion matcher."""
598 597
599 598 # note: TypedDict is added again to the inheritance chain
600 599 # in order to get __orig_bases__ for documentation
601 600
602 601 #: List of candidate completions
603 602 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
604 603
605 604
606 605 class _JediMatcherResult(_MatcherResultBase):
607 606 """Matching result returned by Jedi (will be processed differently)"""
608 607
609 608 #: list of candidate completions
610 609 completions: Iterator[_JediCompletionLike]
611 610
612 611
613 612 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
614 613 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
615 614
616 615
617 616 @dataclass
618 617 class CompletionContext:
619 618 """Completion context provided as an argument to matchers in the Matcher API v2."""
620 619
621 620 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
622 621 # which was not explicitly visible as an argument of the matcher, making any refactor
623 622 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
624 623 # from the completer, and make substituting them in sub-classes easier.
625 624
626 625 #: Relevant fragment of code directly preceding the cursor.
627 626 #: The extraction of token is implemented via splitter heuristic
628 627 #: (following readline behaviour for legacy reasons), which is user configurable
629 628 #: (by switching the greedy mode).
630 629 token: str
631 630
632 631 #: The full available content of the editor or buffer
633 632 full_text: str
634 633
635 634 #: Cursor position in the line (the same for ``full_text`` and ``text``).
636 635 cursor_position: int
637 636
638 637 #: Cursor line in ``full_text``.
639 638 cursor_line: int
640 639
641 640 #: The maximum number of completions that will be used downstream.
642 641 #: Matchers can use this information to abort early.
643 642 #: The built-in Jedi matcher is currently excepted from this limit.
644 643 # If not given, return all possible completions.
645 644 limit: Optional[int]
646 645
647 646 @cached_property
648 647 def text_until_cursor(self) -> str:
649 648 return self.line_with_cursor[: self.cursor_position]
650 649
651 650 @cached_property
652 651 def line_with_cursor(self) -> str:
653 652 return self.full_text.split("\n")[self.cursor_line]
654 653
655 654
656 655 #: Matcher results for API v2.
657 656 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
658 657
659 658
660 659 class _MatcherAPIv1Base(Protocol):
661 660 def __call__(self, text: str) -> List[str]:
662 661 """Call signature."""
663 662 ...
664 663
665 664 #: Used to construct the default matcher identifier
666 665 __qualname__: str
667 666
668 667
669 668 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
670 669 #: API version
671 670 matcher_api_version: Optional[Literal[1]]
672 671
673 672 def __call__(self, text: str) -> List[str]:
674 673 """Call signature."""
675 674 ...
676 675
677 676
678 677 #: Protocol describing Matcher API v1.
679 678 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
680 679
681 680
682 681 class MatcherAPIv2(Protocol):
683 682 """Protocol describing Matcher API v2."""
684 683
685 684 #: API version
686 685 matcher_api_version: Literal[2] = 2
687 686
688 687 def __call__(self, context: CompletionContext) -> MatcherResult:
689 688 """Call signature."""
690 689 ...
691 690
692 691 #: Used to construct the default matcher identifier
693 692 __qualname__: str
694 693
695 694
696 695 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
697 696
698 697
699 698 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
700 699 api_version = _get_matcher_api_version(matcher)
701 700 return api_version == 1
702 701
703 702
704 703 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
705 704 api_version = _get_matcher_api_version(matcher)
706 705 return api_version == 2
707 706
708 707
709 708 def _is_sizable(value: Any) -> TypeGuard[Sized]:
710 709 """Determines whether objects is sizable"""
711 710 return hasattr(value, "__len__")
712 711
713 712
714 713 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
715 714 """Determines whether objects is sizable"""
716 715 return hasattr(value, "__next__")
717 716
718 717
719 718 def has_any_completions(result: MatcherResult) -> bool:
720 719 """Check if any result includes any completions."""
721 720 completions = result["completions"]
722 721 if _is_sizable(completions):
723 722 return len(completions) != 0
724 723 if _is_iterator(completions):
725 724 try:
726 725 old_iterator = completions
727 726 first = next(old_iterator)
728 727 result["completions"] = cast(
729 728 Iterator[SimpleCompletion],
730 729 itertools.chain([first], old_iterator),
731 730 )
732 731 return True
733 732 except StopIteration:
734 733 return False
735 734 raise ValueError(
736 735 "Completions returned by matcher need to be an Iterator or a Sizable"
737 736 )
738 737
739 738
740 739 def completion_matcher(
741 740 *,
742 741 priority: Optional[float] = None,
743 742 identifier: Optional[str] = None,
744 743 api_version: int = 1,
745 744 ):
746 745 """Adds attributes describing the matcher.
747 746
748 747 Parameters
749 748 ----------
750 749 priority : Optional[float]
751 750 The priority of the matcher, determines the order of execution of matchers.
752 751 Higher priority means that the matcher will be executed first. Defaults to 0.
753 752 identifier : Optional[str]
754 753 identifier of the matcher allowing users to modify the behaviour via traitlets,
755 754 and also used to for debugging (will be passed as ``origin`` with the completions).
756 755
757 756 Defaults to matcher function's ``__qualname__`` (for example,
758 757 ``IPCompleter.file_matcher`` for the built-in matched defined
759 758 as a ``file_matcher`` method of the ``IPCompleter`` class).
760 759 api_version: Optional[int]
761 760 version of the Matcher API used by this matcher.
762 761 Currently supported values are 1 and 2.
763 762 Defaults to 1.
764 763 """
765 764
766 765 def wrapper(func: Matcher):
767 766 func.matcher_priority = priority or 0 # type: ignore
768 767 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
769 768 func.matcher_api_version = api_version # type: ignore
770 769 if TYPE_CHECKING:
771 770 if api_version == 1:
772 771 func = cast(MatcherAPIv1, func)
773 772 elif api_version == 2:
774 773 func = cast(MatcherAPIv2, func)
775 774 return func
776 775
777 776 return wrapper
778 777
779 778
780 779 def _get_matcher_priority(matcher: Matcher):
781 780 return getattr(matcher, "matcher_priority", 0)
782 781
783 782
784 783 def _get_matcher_id(matcher: Matcher):
785 784 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
786 785
787 786
788 787 def _get_matcher_api_version(matcher):
789 788 return getattr(matcher, "matcher_api_version", 1)
790 789
791 790
792 791 context_matcher = partial(completion_matcher, api_version=2)
793 792
794 793
795 794 _IC = Iterable[Completion]
796 795
797 796
798 797 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
799 798 """
800 799 Deduplicate a set of completions.
801 800
802 801 .. warning::
803 802
804 803 Unstable
805 804
806 805 This function is unstable, API may change without warning.
807 806
808 807 Parameters
809 808 ----------
810 809 text : str
811 810 text that should be completed.
812 811 completions : Iterator[Completion]
813 812 iterator over the completions to deduplicate
814 813
815 814 Yields
816 815 ------
817 816 `Completions` objects
818 817 Completions coming from multiple sources, may be different but end up having
819 818 the same effect when applied to ``text``. If this is the case, this will
820 819 consider completions as equal and only emit the first encountered.
821 820 Not folded in `completions()` yet for debugging purpose, and to detect when
822 821 the IPython completer does return things that Jedi does not, but should be
823 822 at some point.
824 823 """
825 824 completions = list(completions)
826 825 if not completions:
827 826 return
828 827
829 828 new_start = min(c.start for c in completions)
830 829 new_end = max(c.end for c in completions)
831 830
832 831 seen = set()
833 832 for c in completions:
834 833 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
835 834 if new_text not in seen:
836 835 yield c
837 836 seen.add(new_text)
838 837
839 838
840 839 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
841 840 """
842 841 Rectify a set of completions to all have the same ``start`` and ``end``
843 842
844 843 .. warning::
845 844
846 845 Unstable
847 846
848 847 This function is unstable, API may change without warning.
849 848 It will also raise unless use in proper context manager.
850 849
851 850 Parameters
852 851 ----------
853 852 text : str
854 853 text that should be completed.
855 854 completions : Iterator[Completion]
856 855 iterator over the completions to rectify
857 856 _debug : bool
858 857 Log failed completion
859 858
860 859 Notes
861 860 -----
862 861 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
863 862 the Jupyter Protocol requires them to behave like so. This will readjust
864 863 the completion to have the same ``start`` and ``end`` by padding both
865 864 extremities with surrounding text.
866 865
867 866 During stabilisation should support a ``_debug`` option to log which
868 867 completion are return by the IPython completer and not found in Jedi in
869 868 order to make upstream bug report.
870 869 """
871 870 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
872 871 "It may change without warnings. "
873 872 "Use in corresponding context manager.",
874 873 category=ProvisionalCompleterWarning, stacklevel=2)
875 874
876 875 completions = list(completions)
877 876 if not completions:
878 877 return
879 878 starts = (c.start for c in completions)
880 879 ends = (c.end for c in completions)
881 880
882 881 new_start = min(starts)
883 882 new_end = max(ends)
884 883
885 884 seen_jedi = set()
886 885 seen_python_matches = set()
887 886 for c in completions:
888 887 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
889 888 if c._origin == 'jedi':
890 889 seen_jedi.add(new_text)
891 890 elif c._origin == "IPCompleter.python_matcher":
892 891 seen_python_matches.add(new_text)
893 892 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
894 893 diff = seen_python_matches.difference(seen_jedi)
895 894 if diff and _debug:
896 895 print('IPython.python matches have extras:', diff)
897 896
898 897
899 898 if sys.platform == 'win32':
900 899 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
901 900 else:
902 901 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
903 902
904 903 GREEDY_DELIMS = ' =\r\n'
905 904
906 905
907 906 class CompletionSplitter(object):
908 907 """An object to split an input line in a manner similar to readline.
909 908
910 909 By having our own implementation, we can expose readline-like completion in
911 910 a uniform manner to all frontends. This object only needs to be given the
912 911 line of text to be split and the cursor position on said line, and it
913 912 returns the 'word' to be completed on at the cursor after splitting the
914 913 entire line.
915 914
916 915 What characters are used as splitting delimiters can be controlled by
917 916 setting the ``delims`` attribute (this is a property that internally
918 917 automatically builds the necessary regular expression)"""
919 918
920 919 # Private interface
921 920
922 921 # A string of delimiter characters. The default value makes sense for
923 922 # IPython's most typical usage patterns.
924 923 _delims = DELIMS
925 924
926 925 # The expression (a normal string) to be compiled into a regular expression
927 926 # for actual splitting. We store it as an attribute mostly for ease of
928 927 # debugging, since this type of code can be so tricky to debug.
929 928 _delim_expr = None
930 929
931 930 # The regular expression that does the actual splitting
932 931 _delim_re = None
933 932
934 933 def __init__(self, delims=None):
935 934 delims = CompletionSplitter._delims if delims is None else delims
936 935 self.delims = delims
937 936
938 937 @property
939 938 def delims(self):
940 939 """Return the string of delimiter characters."""
941 940 return self._delims
942 941
943 942 @delims.setter
944 943 def delims(self, delims):
945 944 """Set the delimiters for line splitting."""
946 945 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
947 946 self._delim_re = re.compile(expr)
948 947 self._delims = delims
949 948 self._delim_expr = expr
950 949
951 950 def split_line(self, line, cursor_pos=None):
952 951 """Split a line of text with a cursor at the given position.
953 952 """
954 953 l = line if cursor_pos is None else line[:cursor_pos]
955 954 return self._delim_re.split(l)[-1]
956 955
957 956
958 957
959 958 class Completer(Configurable):
960 959
961 960 greedy = Bool(
962 961 False,
963 962 help="""Activate greedy completion.
964 963
965 964 .. deprecated:: 8.8
966 965 Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
967 966
968 967 When enabled in IPython 8.8 or newer, changes configuration as follows:
969 968
970 969 - ``Completer.evaluation = 'unsafe'``
971 970 - ``Completer.auto_close_dict_keys = True``
972 971 """,
973 972 ).tag(config=True)
974 973
975 974 evaluation = Enum(
976 975 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
977 976 default_value="limited",
978 977 help="""Policy for code evaluation under completion.
979 978
980 979 Successive options allow to enable more eager evaluation for better
981 980 completion suggestions, including for nested dictionaries, nested lists,
982 981 or even results of function calls.
983 982 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
984 983 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
985 984
986 985 Allowed values are:
987 986
988 987 - ``forbidden``: no evaluation of code is permitted,
989 988 - ``minimal``: evaluation of literals and access to built-in namespace;
990 989 no item/attribute evaluationm no access to locals/globals,
991 990 no evaluation of any operations or comparisons.
992 991 - ``limited``: access to all namespaces, evaluation of hard-coded methods
993 992 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
994 993 :any:`object.__getitem__`) on allow-listed objects (for example:
995 994 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
996 995 - ``unsafe``: evaluation of all methods and function calls but not of
997 996 syntax with side-effects like `del x`,
998 997 - ``dangerous``: completely arbitrary evaluation.
999 998 """,
1000 999 ).tag(config=True)
1001 1000
1002 1001 use_jedi = Bool(default_value=JEDI_INSTALLED,
1003 1002 help="Experimental: Use Jedi to generate autocompletions. "
1004 1003 "Default to True if jedi is installed.").tag(config=True)
1005 1004
1006 1005 jedi_compute_type_timeout = Int(default_value=400,
1007 1006 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
1008 1007 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
1009 1008 performance by preventing jedi to build its cache.
1010 1009 """).tag(config=True)
1011 1010
1012 1011 debug = Bool(default_value=False,
1013 1012 help='Enable debug for the Completer. Mostly print extra '
1014 1013 'information for experimental jedi integration.')\
1015 1014 .tag(config=True)
1016 1015
1017 1016 backslash_combining_completions = Bool(True,
1018 1017 help="Enable unicode completions, e.g. \\alpha<tab> . "
1019 1018 "Includes completion of latex commands, unicode names, and expanding "
1020 1019 "unicode characters back to latex commands.").tag(config=True)
1021 1020
1022 1021 auto_close_dict_keys = Bool(
1023 1022 False,
1024 1023 help="""
1025 1024 Enable auto-closing dictionary keys.
1026 1025
1027 1026 When enabled string keys will be suffixed with a final quote
1028 1027 (matching the opening quote), tuple keys will also receive a
1029 1028 separating comma if needed, and keys which are final will
1030 1029 receive a closing bracket (``]``).
1031 1030 """,
1032 1031 ).tag(config=True)
1033 1032
1034 1033 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1035 1034 """Create a new completer for the command line.
1036 1035
1037 1036 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
1038 1037
1039 1038 If unspecified, the default namespace where completions are performed
1040 1039 is __main__ (technically, __main__.__dict__). Namespaces should be
1041 1040 given as dictionaries.
1042 1041
1043 1042 An optional second namespace can be given. This allows the completer
1044 1043 to handle cases where both the local and global scopes need to be
1045 1044 distinguished.
1046 1045 """
1047 1046
1048 1047 # Don't bind to namespace quite yet, but flag whether the user wants a
1049 1048 # specific namespace or to use __main__.__dict__. This will allow us
1050 1049 # to bind to __main__.__dict__ at completion time, not now.
1051 1050 if namespace is None:
1052 1051 self.use_main_ns = True
1053 1052 else:
1054 1053 self.use_main_ns = False
1055 1054 self.namespace = namespace
1056 1055
1057 1056 # The global namespace, if given, can be bound directly
1058 1057 if global_namespace is None:
1059 1058 self.global_namespace = {}
1060 1059 else:
1061 1060 self.global_namespace = global_namespace
1062 1061
1063 1062 self.custom_matchers = []
1064 1063
1065 1064 super(Completer, self).__init__(**kwargs)
1066 1065
1067 1066 def complete(self, text, state):
1068 1067 """Return the next possible completion for 'text'.
1069 1068
1070 1069 This is called successively with state == 0, 1, 2, ... until it
1071 1070 returns None. The completion should begin with 'text'.
1072 1071
1073 1072 """
1074 1073 if self.use_main_ns:
1075 1074 self.namespace = __main__.__dict__
1076 1075
1077 1076 if state == 0:
1078 1077 if "." in text:
1079 1078 self.matches = self.attr_matches(text)
1080 1079 else:
1081 1080 self.matches = self.global_matches(text)
1082 1081 try:
1083 1082 return self.matches[state]
1084 1083 except IndexError:
1085 1084 return None
1086 1085
1087 1086 def global_matches(self, text):
1088 1087 """Compute matches when text is a simple name.
1089 1088
1090 1089 Return a list of all keywords, built-in functions and names currently
1091 1090 defined in self.namespace or self.global_namespace that match.
1092 1091
1093 1092 """
1094 1093 matches = []
1095 1094 match_append = matches.append
1096 1095 n = len(text)
1097 1096 for lst in [
1098 1097 keyword.kwlist,
1099 1098 builtin_mod.__dict__.keys(),
1100 1099 list(self.namespace.keys()),
1101 1100 list(self.global_namespace.keys()),
1102 1101 ]:
1103 1102 for word in lst:
1104 1103 if word[:n] == text and word != "__builtins__":
1105 1104 match_append(word)
1106 1105
1107 1106 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1108 1107 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1109 1108 shortened = {
1110 1109 "_".join([sub[0] for sub in word.split("_")]): word
1111 1110 for word in lst
1112 1111 if snake_case_re.match(word)
1113 1112 }
1114 1113 for word in shortened.keys():
1115 1114 if word[:n] == text and word != "__builtins__":
1116 1115 match_append(shortened[word])
1117 1116 return matches
1118 1117
1119 1118 def attr_matches(self, text):
1120 1119 """Compute matches when text contains a dot.
1121 1120
1122 1121 Assuming the text is of the form NAME.NAME....[NAME], and is
1123 1122 evaluatable in self.namespace or self.global_namespace, it will be
1124 1123 evaluated and its attributes (as revealed by dir()) are used as
1125 1124 possible completions. (For class instances, class members are
1126 1125 also considered.)
1127 1126
1128 1127 WARNING: this can still invoke arbitrary C code, if an object
1129 1128 with a __getattr__ hook is evaluated.
1130 1129
1131 1130 """
1132 1131 return self._attr_matches(text)[0]
1133 1132
1134 1133 def _attr_matches(self, text, include_prefix=True) -> Tuple[Sequence[str], str]:
1135 1134 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1136 1135 if not m2:
1137 1136 return [], ""
1138 1137 expr, attr = m2.group(1, 2)
1139 1138
1140 1139 obj = self._evaluate_expr(expr)
1141 1140
1142 1141 if obj is not_found:
1143 1142 return [], ""
1144 1143
1145 1144 if self.limit_to__all__ and hasattr(obj, '__all__'):
1146 1145 words = get__all__entries(obj)
1147 1146 else:
1148 1147 words = dir2(obj)
1149 1148
1150 1149 try:
1151 1150 words = generics.complete_object(obj, words)
1152 1151 except TryNext:
1153 1152 pass
1154 1153 except AssertionError:
1155 1154 raise
1156 1155 except Exception:
1157 1156 # Silence errors from completion function
1158 1157 pass
1159 1158 # Build match list to return
1160 1159 n = len(attr)
1161 1160
1162 1161 # Note: ideally we would just return words here and the prefix
1163 1162 # reconciliator would know that we intend to append to rather than
1164 1163 # replace the input text; this requires refactoring to return range
1165 1164 # which ought to be replaced (as does jedi).
1166 1165 if include_prefix:
1167 1166 tokens = _parse_tokens(expr)
1168 1167 rev_tokens = reversed(tokens)
1169 1168 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1170 1169 name_turn = True
1171 1170
1172 1171 parts = []
1173 1172 for token in rev_tokens:
1174 1173 if token.type in skip_over:
1175 1174 continue
1176 1175 if token.type == tokenize.NAME and name_turn:
1177 1176 parts.append(token.string)
1178 1177 name_turn = False
1179 1178 elif (
1180 1179 token.type == tokenize.OP and token.string == "." and not name_turn
1181 1180 ):
1182 1181 parts.append(token.string)
1183 1182 name_turn = True
1184 1183 else:
1185 1184 # short-circuit if not empty nor name token
1186 1185 break
1187 1186
1188 1187 prefix_after_space = "".join(reversed(parts))
1189 1188 else:
1190 1189 prefix_after_space = ""
1191 1190
1192 1191 return (
1193 1192 ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr],
1194 1193 "." + attr,
1195 1194 )
1196 1195
1197 1196 def _evaluate_expr(self, expr):
1198 1197 obj = not_found
1199 1198 done = False
1200 1199 while not done and expr:
1201 1200 try:
1202 1201 obj = guarded_eval(
1203 1202 expr,
1204 1203 EvaluationContext(
1205 1204 globals=self.global_namespace,
1206 1205 locals=self.namespace,
1207 1206 evaluation=self.evaluation,
1208 1207 ),
1209 1208 )
1210 1209 done = True
1211 1210 except Exception as e:
1212 1211 if self.debug:
1213 1212 print("Evaluation exception", e)
1214 1213 # trim the expression to remove any invalid prefix
1215 1214 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1216 1215 # where parenthesis is not closed.
1217 1216 # TODO: make this faster by reusing parts of the computation?
1218 1217 expr = expr[1:]
1219 1218 return obj
1220 1219
1221 1220 def get__all__entries(obj):
1222 1221 """returns the strings in the __all__ attribute"""
1223 1222 try:
1224 1223 words = getattr(obj, '__all__')
1225 1224 except:
1226 1225 return []
1227 1226
1228 1227 return [w for w in words if isinstance(w, str)]
1229 1228
1230 1229
1231 1230 class _DictKeyState(enum.Flag):
1232 1231 """Represent state of the key match in context of other possible matches.
1233 1232
1234 1233 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1235 1234 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1236 1235 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1237 1236 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1238 1237 """
1239 1238
1240 1239 BASELINE = 0
1241 1240 END_OF_ITEM = enum.auto()
1242 1241 END_OF_TUPLE = enum.auto()
1243 1242 IN_TUPLE = enum.auto()
1244 1243
1245 1244
1246 1245 def _parse_tokens(c):
1247 1246 """Parse tokens even if there is an error."""
1248 1247 tokens = []
1249 1248 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1250 1249 while True:
1251 1250 try:
1252 1251 tokens.append(next(token_generator))
1253 1252 except tokenize.TokenError:
1254 1253 return tokens
1255 1254 except StopIteration:
1256 1255 return tokens
1257 1256
1258 1257
1259 1258 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1260 1259 """Match any valid Python numeric literal in a prefix of dictionary keys.
1261 1260
1262 1261 References:
1263 1262 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1264 1263 - https://docs.python.org/3/library/tokenize.html
1265 1264 """
1266 1265 if prefix[-1].isspace():
1267 1266 # if user typed a space we do not have anything to complete
1268 1267 # even if there was a valid number token before
1269 1268 return None
1270 1269 tokens = _parse_tokens(prefix)
1271 1270 rev_tokens = reversed(tokens)
1272 1271 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1273 1272 number = None
1274 1273 for token in rev_tokens:
1275 1274 if token.type in skip_over:
1276 1275 continue
1277 1276 if number is None:
1278 1277 if token.type == tokenize.NUMBER:
1279 1278 number = token.string
1280 1279 continue
1281 1280 else:
1282 1281 # we did not match a number
1283 1282 return None
1284 1283 if token.type == tokenize.OP:
1285 1284 if token.string == ",":
1286 1285 break
1287 1286 if token.string in {"+", "-"}:
1288 1287 number = token.string + number
1289 1288 else:
1290 1289 return None
1291 1290 return number
1292 1291
1293 1292
1294 1293 _INT_FORMATS = {
1295 1294 "0b": bin,
1296 1295 "0o": oct,
1297 1296 "0x": hex,
1298 1297 }
1299 1298
1300 1299
1301 1300 def match_dict_keys(
1302 1301 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1303 1302 prefix: str,
1304 1303 delims: str,
1305 1304 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1306 1305 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1307 1306 """Used by dict_key_matches, matching the prefix to a list of keys
1308 1307
1309 1308 Parameters
1310 1309 ----------
1311 1310 keys
1312 1311 list of keys in dictionary currently being completed.
1313 1312 prefix
1314 1313 Part of the text already typed by the user. E.g. `mydict[b'fo`
1315 1314 delims
1316 1315 String of delimiters to consider when finding the current key.
1317 1316 extra_prefix : optional
1318 1317 Part of the text already typed in multi-key index cases. E.g. for
1319 1318 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1320 1319
1321 1320 Returns
1322 1321 -------
1323 1322 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1324 1323 ``quote`` being the quote that need to be used to close current string.
1325 1324 ``token_start`` the position where the replacement should start occurring,
1326 1325 ``matches`` a dictionary of replacement/completion keys on keys and values
1327 1326 indicating whether the state.
1328 1327 """
1329 1328 prefix_tuple = extra_prefix if extra_prefix else ()
1330 1329
1331 1330 prefix_tuple_size = sum(
1332 1331 [
1333 1332 # for pandas, do not count slices as taking space
1334 1333 not isinstance(k, slice)
1335 1334 for k in prefix_tuple
1336 1335 ]
1337 1336 )
1338 1337 text_serializable_types = (str, bytes, int, float, slice)
1339 1338
1340 1339 def filter_prefix_tuple(key):
1341 1340 # Reject too short keys
1342 1341 if len(key) <= prefix_tuple_size:
1343 1342 return False
1344 1343 # Reject keys which cannot be serialised to text
1345 1344 for k in key:
1346 1345 if not isinstance(k, text_serializable_types):
1347 1346 return False
1348 1347 # Reject keys that do not match the prefix
1349 1348 for k, pt in zip(key, prefix_tuple):
1350 1349 if k != pt and not isinstance(pt, slice):
1351 1350 return False
1352 1351 # All checks passed!
1353 1352 return True
1354 1353
1355 1354 filtered_key_is_final: Dict[Union[str, bytes, int, float], _DictKeyState] = (
1356 1355 defaultdict(lambda: _DictKeyState.BASELINE)
1357 1356 )
1358 1357
1359 1358 for k in keys:
1360 1359 # If at least one of the matches is not final, mark as undetermined.
1361 1360 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1362 1361 # `111` appears final on first match but is not final on the second.
1363 1362
1364 1363 if isinstance(k, tuple):
1365 1364 if filter_prefix_tuple(k):
1366 1365 key_fragment = k[prefix_tuple_size]
1367 1366 filtered_key_is_final[key_fragment] |= (
1368 1367 _DictKeyState.END_OF_TUPLE
1369 1368 if len(k) == prefix_tuple_size + 1
1370 1369 else _DictKeyState.IN_TUPLE
1371 1370 )
1372 1371 elif prefix_tuple_size > 0:
1373 1372 # we are completing a tuple but this key is not a tuple,
1374 1373 # so we should ignore it
1375 1374 pass
1376 1375 else:
1377 1376 if isinstance(k, text_serializable_types):
1378 1377 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1379 1378
1380 1379 filtered_keys = filtered_key_is_final.keys()
1381 1380
1382 1381 if not prefix:
1383 1382 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1384 1383
1385 1384 quote_match = re.search("(?:\"|')", prefix)
1386 1385 is_user_prefix_numeric = False
1387 1386
1388 1387 if quote_match:
1389 1388 quote = quote_match.group()
1390 1389 valid_prefix = prefix + quote
1391 1390 try:
1392 1391 prefix_str = literal_eval(valid_prefix)
1393 1392 except Exception:
1394 1393 return "", 0, {}
1395 1394 else:
1396 1395 # If it does not look like a string, let's assume
1397 1396 # we are dealing with a number or variable.
1398 1397 number_match = _match_number_in_dict_key_prefix(prefix)
1399 1398
1400 1399 # We do not want the key matcher to suggest variable names so we yield:
1401 1400 if number_match is None:
1402 1401 # The alternative would be to assume that user forgort the quote
1403 1402 # and if the substring matches, suggest adding it at the start.
1404 1403 return "", 0, {}
1405 1404
1406 1405 prefix_str = number_match
1407 1406 is_user_prefix_numeric = True
1408 1407 quote = ""
1409 1408
1410 1409 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1411 1410 token_match = re.search(pattern, prefix, re.UNICODE)
1412 1411 assert token_match is not None # silence mypy
1413 1412 token_start = token_match.start()
1414 1413 token_prefix = token_match.group()
1415 1414
1416 1415 matched: Dict[str, _DictKeyState] = {}
1417 1416
1418 1417 str_key: Union[str, bytes]
1419 1418
1420 1419 for key in filtered_keys:
1421 1420 if isinstance(key, (int, float)):
1422 1421 # User typed a number but this key is not a number.
1423 1422 if not is_user_prefix_numeric:
1424 1423 continue
1425 1424 str_key = str(key)
1426 1425 if isinstance(key, int):
1427 1426 int_base = prefix_str[:2].lower()
1428 1427 # if user typed integer using binary/oct/hex notation:
1429 1428 if int_base in _INT_FORMATS:
1430 1429 int_format = _INT_FORMATS[int_base]
1431 1430 str_key = int_format(key)
1432 1431 else:
1433 1432 # User typed a string but this key is a number.
1434 1433 if is_user_prefix_numeric:
1435 1434 continue
1436 1435 str_key = key
1437 1436 try:
1438 1437 if not str_key.startswith(prefix_str):
1439 1438 continue
1440 1439 except (AttributeError, TypeError, UnicodeError) as e:
1441 1440 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1442 1441 continue
1443 1442
1444 1443 # reformat remainder of key to begin with prefix
1445 1444 rem = str_key[len(prefix_str) :]
1446 1445 # force repr wrapped in '
1447 1446 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1448 1447 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1449 1448 if quote == '"':
1450 1449 # The entered prefix is quoted with ",
1451 1450 # but the match is quoted with '.
1452 1451 # A contained " hence needs escaping for comparison:
1453 1452 rem_repr = rem_repr.replace('"', '\\"')
1454 1453
1455 1454 # then reinsert prefix from start of token
1456 1455 match = "%s%s" % (token_prefix, rem_repr)
1457 1456
1458 1457 matched[match] = filtered_key_is_final[key]
1459 1458 return quote, token_start, matched
1460 1459
1461 1460
1462 1461 def cursor_to_position(text:str, line:int, column:int)->int:
1463 1462 """
1464 1463 Convert the (line,column) position of the cursor in text to an offset in a
1465 1464 string.
1466 1465
1467 1466 Parameters
1468 1467 ----------
1469 1468 text : str
1470 1469 The text in which to calculate the cursor offset
1471 1470 line : int
1472 1471 Line of the cursor; 0-indexed
1473 1472 column : int
1474 1473 Column of the cursor 0-indexed
1475 1474
1476 1475 Returns
1477 1476 -------
1478 1477 Position of the cursor in ``text``, 0-indexed.
1479 1478
1480 1479 See Also
1481 1480 --------
1482 1481 position_to_cursor : reciprocal of this function
1483 1482
1484 1483 """
1485 1484 lines = text.split('\n')
1486 1485 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1487 1486
1488 1487 return sum(len(l) + 1 for l in lines[:line]) + column
1489 1488
1490 1489 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1491 1490 """
1492 1491 Convert the position of the cursor in text (0 indexed) to a line
1493 1492 number(0-indexed) and a column number (0-indexed) pair
1494 1493
1495 1494 Position should be a valid position in ``text``.
1496 1495
1497 1496 Parameters
1498 1497 ----------
1499 1498 text : str
1500 1499 The text in which to calculate the cursor offset
1501 1500 offset : int
1502 1501 Position of the cursor in ``text``, 0-indexed.
1503 1502
1504 1503 Returns
1505 1504 -------
1506 1505 (line, column) : (int, int)
1507 1506 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1508 1507
1509 1508 See Also
1510 1509 --------
1511 1510 cursor_to_position : reciprocal of this function
1512 1511
1513 1512 """
1514 1513
1515 1514 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1516 1515
1517 1516 before = text[:offset]
1518 1517 blines = before.split('\n') # ! splitnes trim trailing \n
1519 1518 line = before.count('\n')
1520 1519 col = len(blines[-1])
1521 1520 return line, col
1522 1521
1523 1522
1524 1523 def _safe_isinstance(obj, module, class_name, *attrs):
1525 1524 """Checks if obj is an instance of module.class_name if loaded
1526 1525 """
1527 1526 if module in sys.modules:
1528 1527 m = sys.modules[module]
1529 1528 for attr in [class_name, *attrs]:
1530 1529 m = getattr(m, attr)
1531 1530 return isinstance(obj, m)
1532 1531
1533 1532
1534 1533 @context_matcher()
1535 1534 def back_unicode_name_matcher(context: CompletionContext):
1536 1535 """Match Unicode characters back to Unicode name
1537 1536
1538 1537 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1539 1538 """
1540 1539 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1541 1540 return _convert_matcher_v1_result_to_v2(
1542 1541 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1543 1542 )
1544 1543
1545 1544
1546 1545 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1547 1546 """Match Unicode characters back to Unicode name
1548 1547
1549 1548 This does ``β˜ƒ`` -> ``\\snowman``
1550 1549
1551 1550 Note that snowman is not a valid python3 combining character but will be expanded.
1552 1551 Though it will not recombine back to the snowman character by the completion machinery.
1553 1552
1554 1553 This will not either back-complete standard sequences like \\n, \\b ...
1555 1554
1556 1555 .. deprecated:: 8.6
1557 1556 You can use :meth:`back_unicode_name_matcher` instead.
1558 1557
1559 1558 Returns
1560 1559 =======
1561 1560
1562 1561 Return a tuple with two elements:
1563 1562
1564 1563 - The Unicode character that was matched (preceded with a backslash), or
1565 1564 empty string,
1566 1565 - a sequence (of 1), name for the match Unicode character, preceded by
1567 1566 backslash, or empty if no match.
1568 1567 """
1569 1568 if len(text)<2:
1570 1569 return '', ()
1571 1570 maybe_slash = text[-2]
1572 1571 if maybe_slash != '\\':
1573 1572 return '', ()
1574 1573
1575 1574 char = text[-1]
1576 1575 # no expand on quote for completion in strings.
1577 1576 # nor backcomplete standard ascii keys
1578 1577 if char in string.ascii_letters or char in ('"',"'"):
1579 1578 return '', ()
1580 1579 try :
1581 1580 unic = unicodedata.name(char)
1582 1581 return '\\'+char,('\\'+unic,)
1583 1582 except KeyError:
1584 1583 pass
1585 1584 return '', ()
1586 1585
1587 1586
1588 1587 @context_matcher()
1589 1588 def back_latex_name_matcher(context: CompletionContext):
1590 1589 """Match latex characters back to unicode name
1591 1590
1592 1591 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1593 1592 """
1594 1593 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1595 1594 return _convert_matcher_v1_result_to_v2(
1596 1595 matches, type="latex", fragment=fragment, suppress_if_matches=True
1597 1596 )
1598 1597
1599 1598
1600 1599 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1601 1600 """Match latex characters back to unicode name
1602 1601
1603 1602 This does ``\\β„΅`` -> ``\\aleph``
1604 1603
1605 1604 .. deprecated:: 8.6
1606 1605 You can use :meth:`back_latex_name_matcher` instead.
1607 1606 """
1608 1607 if len(text)<2:
1609 1608 return '', ()
1610 1609 maybe_slash = text[-2]
1611 1610 if maybe_slash != '\\':
1612 1611 return '', ()
1613 1612
1614 1613
1615 1614 char = text[-1]
1616 1615 # no expand on quote for completion in strings.
1617 1616 # nor backcomplete standard ascii keys
1618 1617 if char in string.ascii_letters or char in ('"',"'"):
1619 1618 return '', ()
1620 1619 try :
1621 1620 latex = reverse_latex_symbol[char]
1622 1621 # '\\' replace the \ as well
1623 1622 return '\\'+char,[latex]
1624 1623 except KeyError:
1625 1624 pass
1626 1625 return '', ()
1627 1626
1628 1627
1629 1628 def _formatparamchildren(parameter) -> str:
1630 1629 """
1631 1630 Get parameter name and value from Jedi Private API
1632 1631
1633 1632 Jedi does not expose a simple way to get `param=value` from its API.
1634 1633
1635 1634 Parameters
1636 1635 ----------
1637 1636 parameter
1638 1637 Jedi's function `Param`
1639 1638
1640 1639 Returns
1641 1640 -------
1642 1641 A string like 'a', 'b=1', '*args', '**kwargs'
1643 1642
1644 1643 """
1645 1644 description = parameter.description
1646 1645 if not description.startswith('param '):
1647 1646 raise ValueError('Jedi function parameter description have change format.'
1648 1647 'Expected "param ...", found %r".' % description)
1649 1648 return description[6:]
1650 1649
1651 1650 def _make_signature(completion)-> str:
1652 1651 """
1653 1652 Make the signature from a jedi completion
1654 1653
1655 1654 Parameters
1656 1655 ----------
1657 1656 completion : jedi.Completion
1658 1657 object does not complete a function type
1659 1658
1660 1659 Returns
1661 1660 -------
1662 1661 a string consisting of the function signature, with the parenthesis but
1663 1662 without the function name. example:
1664 1663 `(a, *args, b=1, **kwargs)`
1665 1664
1666 1665 """
1667 1666
1668 1667 # it looks like this might work on jedi 0.17
1669 1668 if hasattr(completion, 'get_signatures'):
1670 1669 signatures = completion.get_signatures()
1671 1670 if not signatures:
1672 1671 return '(?)'
1673 1672
1674 1673 c0 = completion.get_signatures()[0]
1675 1674 return '('+c0.to_string().split('(', maxsplit=1)[1]
1676 1675
1677 1676 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1678 1677 for p in signature.defined_names()) if f])
1679 1678
1680 1679
1681 1680 _CompleteResult = Dict[str, MatcherResult]
1682 1681
1683 1682
1684 1683 DICT_MATCHER_REGEX = re.compile(
1685 1684 r"""(?x)
1686 1685 ( # match dict-referring - or any get item object - expression
1687 1686 .+
1688 1687 )
1689 1688 \[ # open bracket
1690 1689 \s* # and optional whitespace
1691 1690 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1692 1691 # and slices
1693 1692 ((?:(?:
1694 1693 (?: # closed string
1695 1694 [uUbB]? # string prefix (r not handled)
1696 1695 (?:
1697 1696 '(?:[^']|(?<!\\)\\')*'
1698 1697 |
1699 1698 "(?:[^"]|(?<!\\)\\")*"
1700 1699 )
1701 1700 )
1702 1701 |
1703 1702 # capture integers and slices
1704 1703 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1705 1704 |
1706 1705 # integer in bin/hex/oct notation
1707 1706 0[bBxXoO]_?(?:\w|\d)+
1708 1707 )
1709 1708 \s*,\s*
1710 1709 )*)
1711 1710 ((?:
1712 1711 (?: # unclosed string
1713 1712 [uUbB]? # string prefix (r not handled)
1714 1713 (?:
1715 1714 '(?:[^']|(?<!\\)\\')*
1716 1715 |
1717 1716 "(?:[^"]|(?<!\\)\\")*
1718 1717 )
1719 1718 )
1720 1719 |
1721 1720 # unfinished integer
1722 1721 (?:[-+]?\d+)
1723 1722 |
1724 1723 # integer in bin/hex/oct notation
1725 1724 0[bBxXoO]_?(?:\w|\d)+
1726 1725 )
1727 1726 )?
1728 1727 $
1729 1728 """
1730 1729 )
1731 1730
1732 1731
1733 1732 def _convert_matcher_v1_result_to_v2(
1734 1733 matches: Sequence[str],
1735 1734 type: str,
1736 1735 fragment: Optional[str] = None,
1737 1736 suppress_if_matches: bool = False,
1738 1737 ) -> SimpleMatcherResult:
1739 1738 """Utility to help with transition"""
1740 1739 result = {
1741 1740 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1742 1741 "suppress": (True if matches else False) if suppress_if_matches else False,
1743 1742 }
1744 1743 if fragment is not None:
1745 1744 result["matched_fragment"] = fragment
1746 1745 return cast(SimpleMatcherResult, result)
1747 1746
1748 1747
1749 1748 class IPCompleter(Completer):
1750 1749 """Extension of the completer class with IPython-specific features"""
1751 1750
1752 1751 @observe('greedy')
1753 1752 def _greedy_changed(self, change):
1754 1753 """update the splitter and readline delims when greedy is changed"""
1755 1754 if change["new"]:
1756 1755 self.evaluation = "unsafe"
1757 1756 self.auto_close_dict_keys = True
1758 1757 self.splitter.delims = GREEDY_DELIMS
1759 1758 else:
1760 1759 self.evaluation = "limited"
1761 1760 self.auto_close_dict_keys = False
1762 1761 self.splitter.delims = DELIMS
1763 1762
1764 1763 dict_keys_only = Bool(
1765 1764 False,
1766 1765 help="""
1767 1766 Whether to show dict key matches only.
1768 1767
1769 1768 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1770 1769 """,
1771 1770 )
1772 1771
1773 1772 suppress_competing_matchers = UnionTrait(
1774 1773 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1775 1774 default_value=None,
1776 1775 help="""
1777 1776 Whether to suppress completions from other *Matchers*.
1778 1777
1779 1778 When set to ``None`` (default) the matchers will attempt to auto-detect
1780 1779 whether suppression of other matchers is desirable. For example, at
1781 1780 the beginning of a line followed by `%` we expect a magic completion
1782 1781 to be the only applicable option, and after ``my_dict['`` we usually
1783 1782 expect a completion with an existing dictionary key.
1784 1783
1785 1784 If you want to disable this heuristic and see completions from all matchers,
1786 1785 set ``IPCompleter.suppress_competing_matchers = False``.
1787 1786 To disable the heuristic for specific matchers provide a dictionary mapping:
1788 1787 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1789 1788
1790 1789 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1791 1790 completions to the set of matchers with the highest priority;
1792 1791 this is equivalent to ``IPCompleter.merge_completions`` and
1793 1792 can be beneficial for performance, but will sometimes omit relevant
1794 1793 candidates from matchers further down the priority list.
1795 1794 """,
1796 1795 ).tag(config=True)
1797 1796
1798 1797 merge_completions = Bool(
1799 1798 True,
1800 1799 help="""Whether to merge completion results into a single list
1801 1800
1802 1801 If False, only the completion results from the first non-empty
1803 1802 completer will be returned.
1804 1803
1805 1804 As of version 8.6.0, setting the value to ``False`` is an alias for:
1806 1805 ``IPCompleter.suppress_competing_matchers = True.``.
1807 1806 """,
1808 1807 ).tag(config=True)
1809 1808
1810 1809 disable_matchers = ListTrait(
1811 1810 Unicode(),
1812 1811 help="""List of matchers to disable.
1813 1812
1814 1813 The list should contain matcher identifiers (see :any:`completion_matcher`).
1815 1814 """,
1816 1815 ).tag(config=True)
1817 1816
1818 1817 omit__names = Enum(
1819 1818 (0, 1, 2),
1820 1819 default_value=2,
1821 1820 help="""Instruct the completer to omit private method names
1822 1821
1823 1822 Specifically, when completing on ``object.<tab>``.
1824 1823
1825 1824 When 2 [default]: all names that start with '_' will be excluded.
1826 1825
1827 1826 When 1: all 'magic' names (``__foo__``) will be excluded.
1828 1827
1829 1828 When 0: nothing will be excluded.
1830 1829 """
1831 1830 ).tag(config=True)
1832 1831 limit_to__all__ = Bool(False,
1833 1832 help="""
1834 1833 DEPRECATED as of version 5.0.
1835 1834
1836 1835 Instruct the completer to use __all__ for the completion
1837 1836
1838 1837 Specifically, when completing on ``object.<tab>``.
1839 1838
1840 1839 When True: only those names in obj.__all__ will be included.
1841 1840
1842 1841 When False [default]: the __all__ attribute is ignored
1843 1842 """,
1844 1843 ).tag(config=True)
1845 1844
1846 1845 profile_completions = Bool(
1847 1846 default_value=False,
1848 1847 help="If True, emit profiling data for completion subsystem using cProfile."
1849 1848 ).tag(config=True)
1850 1849
1851 1850 profiler_output_dir = Unicode(
1852 1851 default_value=".completion_profiles",
1853 1852 help="Template for path at which to output profile data for completions."
1854 1853 ).tag(config=True)
1855 1854
1856 1855 @observe('limit_to__all__')
1857 1856 def _limit_to_all_changed(self, change):
1858 1857 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1859 1858 'value has been deprecated since IPython 5.0, will be made to have '
1860 1859 'no effects and then removed in future version of IPython.',
1861 1860 UserWarning)
1862 1861
1863 1862 def __init__(
1864 1863 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1865 1864 ):
1866 1865 """IPCompleter() -> completer
1867 1866
1868 1867 Return a completer object.
1869 1868
1870 1869 Parameters
1871 1870 ----------
1872 1871 shell
1873 1872 a pointer to the ipython shell itself. This is needed
1874 1873 because this completer knows about magic functions, and those can
1875 1874 only be accessed via the ipython instance.
1876 1875 namespace : dict, optional
1877 1876 an optional dict where completions are performed.
1878 1877 global_namespace : dict, optional
1879 1878 secondary optional dict for completions, to
1880 1879 handle cases (such as IPython embedded inside functions) where
1881 1880 both Python scopes are visible.
1882 1881 config : Config
1883 1882 traitlet's config object
1884 1883 **kwargs
1885 1884 passed to super class unmodified.
1886 1885 """
1887 1886
1888 1887 self.magic_escape = ESC_MAGIC
1889 1888 self.splitter = CompletionSplitter()
1890 1889
1891 1890 # _greedy_changed() depends on splitter and readline being defined:
1892 1891 super().__init__(
1893 1892 namespace=namespace,
1894 1893 global_namespace=global_namespace,
1895 1894 config=config,
1896 1895 **kwargs,
1897 1896 )
1898 1897
1899 1898 # List where completion matches will be stored
1900 1899 self.matches = []
1901 1900 self.shell = shell
1902 1901 # Regexp to split filenames with spaces in them
1903 1902 self.space_name_re = re.compile(r'([^\\] )')
1904 1903 # Hold a local ref. to glob.glob for speed
1905 1904 self.glob = glob.glob
1906 1905
1907 1906 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1908 1907 # buffers, to avoid completion problems.
1909 1908 term = os.environ.get('TERM','xterm')
1910 1909 self.dumb_terminal = term in ['dumb','emacs']
1911 1910
1912 1911 # Special handling of backslashes needed in win32 platforms
1913 1912 if sys.platform == "win32":
1914 1913 self.clean_glob = self._clean_glob_win32
1915 1914 else:
1916 1915 self.clean_glob = self._clean_glob
1917 1916
1918 1917 #regexp to parse docstring for function signature
1919 1918 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1920 1919 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1921 1920 #use this if positional argument name is also needed
1922 1921 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1923 1922
1924 1923 self.magic_arg_matchers = [
1925 1924 self.magic_config_matcher,
1926 1925 self.magic_color_matcher,
1927 1926 ]
1928 1927
1929 1928 # This is set externally by InteractiveShell
1930 1929 self.custom_completers = None
1931 1930
1932 1931 # This is a list of names of unicode characters that can be completed
1933 1932 # into their corresponding unicode value. The list is large, so we
1934 1933 # lazily initialize it on first use. Consuming code should access this
1935 1934 # attribute through the `@unicode_names` property.
1936 1935 self._unicode_names = None
1937 1936
1938 1937 self._backslash_combining_matchers = [
1939 1938 self.latex_name_matcher,
1940 1939 self.unicode_name_matcher,
1941 1940 back_latex_name_matcher,
1942 1941 back_unicode_name_matcher,
1943 1942 self.fwd_unicode_matcher,
1944 1943 ]
1945 1944
1946 1945 if not self.backslash_combining_completions:
1947 1946 for matcher in self._backslash_combining_matchers:
1948 1947 self.disable_matchers.append(_get_matcher_id(matcher))
1949 1948
1950 1949 if not self.merge_completions:
1951 1950 self.suppress_competing_matchers = True
1952 1951
1953 1952 @property
1954 1953 def matchers(self) -> List[Matcher]:
1955 1954 """All active matcher routines for completion"""
1956 1955 if self.dict_keys_only:
1957 1956 return [self.dict_key_matcher]
1958 1957
1959 1958 if self.use_jedi:
1960 1959 return [
1961 1960 *self.custom_matchers,
1962 1961 *self._backslash_combining_matchers,
1963 1962 *self.magic_arg_matchers,
1964 1963 self.custom_completer_matcher,
1965 1964 self.magic_matcher,
1966 1965 self._jedi_matcher,
1967 1966 self.dict_key_matcher,
1968 1967 self.file_matcher,
1969 1968 ]
1970 1969 else:
1971 1970 return [
1972 1971 *self.custom_matchers,
1973 1972 *self._backslash_combining_matchers,
1974 1973 *self.magic_arg_matchers,
1975 1974 self.custom_completer_matcher,
1976 1975 self.dict_key_matcher,
1977 1976 self.magic_matcher,
1978 1977 self.python_matcher,
1979 1978 self.file_matcher,
1980 1979 self.python_func_kw_matcher,
1981 1980 ]
1982 1981
1983 1982 def all_completions(self, text:str) -> List[str]:
1984 1983 """
1985 1984 Wrapper around the completion methods for the benefit of emacs.
1986 1985 """
1987 1986 prefix = text.rpartition('.')[0]
1988 1987 with provisionalcompleter():
1989 1988 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1990 1989 for c in self.completions(text, len(text))]
1991 1990
1992 1991 return self.complete(text)[1]
1993 1992
1994 1993 def _clean_glob(self, text:str):
1995 1994 return self.glob("%s*" % text)
1996 1995
1997 1996 def _clean_glob_win32(self, text:str):
1998 1997 return [f.replace("\\","/")
1999 1998 for f in self.glob("%s*" % text)]
2000 1999
2001 2000 @context_matcher()
2002 2001 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2003 2002 """Same as :any:`file_matches`, but adopted to new Matcher API."""
2004 2003 matches = self.file_matches(context.token)
2005 2004 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
2006 2005 # starts with `/home/`, `C:\`, etc)
2007 2006 return _convert_matcher_v1_result_to_v2(matches, type="path")
2008 2007
2009 2008 def file_matches(self, text: str) -> List[str]:
2010 2009 """Match filenames, expanding ~USER type strings.
2011 2010
2012 2011 Most of the seemingly convoluted logic in this completer is an
2013 2012 attempt to handle filenames with spaces in them. And yet it's not
2014 2013 quite perfect, because Python's readline doesn't expose all of the
2015 2014 GNU readline details needed for this to be done correctly.
2016 2015
2017 2016 For a filename with a space in it, the printed completions will be
2018 2017 only the parts after what's already been typed (instead of the
2019 2018 full completions, as is normally done). I don't think with the
2020 2019 current (as of Python 2.3) Python readline it's possible to do
2021 2020 better.
2022 2021
2023 2022 .. deprecated:: 8.6
2024 2023 You can use :meth:`file_matcher` instead.
2025 2024 """
2026 2025
2027 2026 # chars that require escaping with backslash - i.e. chars
2028 2027 # that readline treats incorrectly as delimiters, but we
2029 2028 # don't want to treat as delimiters in filename matching
2030 2029 # when escaped with backslash
2031 2030 if text.startswith('!'):
2032 2031 text = text[1:]
2033 2032 text_prefix = u'!'
2034 2033 else:
2035 2034 text_prefix = u''
2036 2035
2037 2036 text_until_cursor = self.text_until_cursor
2038 2037 # track strings with open quotes
2039 2038 open_quotes = has_open_quotes(text_until_cursor)
2040 2039
2041 2040 if '(' in text_until_cursor or '[' in text_until_cursor:
2042 2041 lsplit = text
2043 2042 else:
2044 2043 try:
2045 2044 # arg_split ~ shlex.split, but with unicode bugs fixed by us
2046 2045 lsplit = arg_split(text_until_cursor)[-1]
2047 2046 except ValueError:
2048 2047 # typically an unmatched ", or backslash without escaped char.
2049 2048 if open_quotes:
2050 2049 lsplit = text_until_cursor.split(open_quotes)[-1]
2051 2050 else:
2052 2051 return []
2053 2052 except IndexError:
2054 2053 # tab pressed on empty line
2055 2054 lsplit = ""
2056 2055
2057 2056 if not open_quotes and lsplit != protect_filename(lsplit):
2058 2057 # if protectables are found, do matching on the whole escaped name
2059 2058 has_protectables = True
2060 2059 text0,text = text,lsplit
2061 2060 else:
2062 2061 has_protectables = False
2063 2062 text = os.path.expanduser(text)
2064 2063
2065 2064 if text == "":
2066 2065 return [text_prefix + protect_filename(f) for f in self.glob("*")]
2067 2066
2068 2067 # Compute the matches from the filesystem
2069 2068 if sys.platform == 'win32':
2070 2069 m0 = self.clean_glob(text)
2071 2070 else:
2072 2071 m0 = self.clean_glob(text.replace('\\', ''))
2073 2072
2074 2073 if has_protectables:
2075 2074 # If we had protectables, we need to revert our changes to the
2076 2075 # beginning of filename so that we don't double-write the part
2077 2076 # of the filename we have so far
2078 2077 len_lsplit = len(lsplit)
2079 2078 matches = [text_prefix + text0 +
2080 2079 protect_filename(f[len_lsplit:]) for f in m0]
2081 2080 else:
2082 2081 if open_quotes:
2083 2082 # if we have a string with an open quote, we don't need to
2084 2083 # protect the names beyond the quote (and we _shouldn't_, as
2085 2084 # it would cause bugs when the filesystem call is made).
2086 2085 matches = m0 if sys.platform == "win32" else\
2087 2086 [protect_filename(f, open_quotes) for f in m0]
2088 2087 else:
2089 2088 matches = [text_prefix +
2090 2089 protect_filename(f) for f in m0]
2091 2090
2092 2091 # Mark directories in input list by appending '/' to their names.
2093 2092 return [x+'/' if os.path.isdir(x) else x for x in matches]
2094 2093
2095 2094 @context_matcher()
2096 2095 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2097 2096 """Match magics."""
2098 2097 text = context.token
2099 2098 matches = self.magic_matches(text)
2100 2099 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
2101 2100 is_magic_prefix = len(text) > 0 and text[0] == "%"
2102 2101 result["suppress"] = is_magic_prefix and bool(result["completions"])
2103 2102 return result
2104 2103
2105 2104 def magic_matches(self, text: str):
2106 2105 """Match magics.
2107 2106
2108 2107 .. deprecated:: 8.6
2109 2108 You can use :meth:`magic_matcher` instead.
2110 2109 """
2111 2110 # Get all shell magics now rather than statically, so magics loaded at
2112 2111 # runtime show up too.
2113 2112 lsm = self.shell.magics_manager.lsmagic()
2114 2113 line_magics = lsm['line']
2115 2114 cell_magics = lsm['cell']
2116 2115 pre = self.magic_escape
2117 2116 pre2 = pre+pre
2118 2117
2119 2118 explicit_magic = text.startswith(pre)
2120 2119
2121 2120 # Completion logic:
2122 2121 # - user gives %%: only do cell magics
2123 2122 # - user gives %: do both line and cell magics
2124 2123 # - no prefix: do both
2125 2124 # In other words, line magics are skipped if the user gives %% explicitly
2126 2125 #
2127 2126 # We also exclude magics that match any currently visible names:
2128 2127 # https://github.com/ipython/ipython/issues/4877, unless the user has
2129 2128 # typed a %:
2130 2129 # https://github.com/ipython/ipython/issues/10754
2131 2130 bare_text = text.lstrip(pre)
2132 2131 global_matches = self.global_matches(bare_text)
2133 2132 if not explicit_magic:
2134 2133 def matches(magic):
2135 2134 """
2136 2135 Filter magics, in particular remove magics that match
2137 2136 a name present in global namespace.
2138 2137 """
2139 2138 return ( magic.startswith(bare_text) and
2140 2139 magic not in global_matches )
2141 2140 else:
2142 2141 def matches(magic):
2143 2142 return magic.startswith(bare_text)
2144 2143
2145 2144 comp = [ pre2+m for m in cell_magics if matches(m)]
2146 2145 if not text.startswith(pre2):
2147 2146 comp += [ pre+m for m in line_magics if matches(m)]
2148 2147
2149 2148 return comp
2150 2149
2151 2150 @context_matcher()
2152 2151 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2153 2152 """Match class names and attributes for %config magic."""
2154 2153 # NOTE: uses `line_buffer` equivalent for compatibility
2155 2154 matches = self.magic_config_matches(context.line_with_cursor)
2156 2155 return _convert_matcher_v1_result_to_v2(matches, type="param")
2157 2156
2158 2157 def magic_config_matches(self, text: str) -> List[str]:
2159 2158 """Match class names and attributes for %config magic.
2160 2159
2161 2160 .. deprecated:: 8.6
2162 2161 You can use :meth:`magic_config_matcher` instead.
2163 2162 """
2164 2163 texts = text.strip().split()
2165 2164
2166 2165 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
2167 2166 # get all configuration classes
2168 2167 classes = sorted(set([ c for c in self.shell.configurables
2169 2168 if c.__class__.class_traits(config=True)
2170 2169 ]), key=lambda x: x.__class__.__name__)
2171 2170 classnames = [ c.__class__.__name__ for c in classes ]
2172 2171
2173 2172 # return all classnames if config or %config is given
2174 2173 if len(texts) == 1:
2175 2174 return classnames
2176 2175
2177 2176 # match classname
2178 2177 classname_texts = texts[1].split('.')
2179 2178 classname = classname_texts[0]
2180 2179 classname_matches = [ c for c in classnames
2181 2180 if c.startswith(classname) ]
2182 2181
2183 2182 # return matched classes or the matched class with attributes
2184 2183 if texts[1].find('.') < 0:
2185 2184 return classname_matches
2186 2185 elif len(classname_matches) == 1 and \
2187 2186 classname_matches[0] == classname:
2188 2187 cls = classes[classnames.index(classname)].__class__
2189 2188 help = cls.class_get_help()
2190 2189 # strip leading '--' from cl-args:
2191 2190 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
2192 2191 return [ attr.split('=')[0]
2193 2192 for attr in help.strip().splitlines()
2194 2193 if attr.startswith(texts[1]) ]
2195 2194 return []
2196 2195
2197 2196 @context_matcher()
2198 2197 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2199 2198 """Match color schemes for %colors magic."""
2200 2199 # NOTE: uses `line_buffer` equivalent for compatibility
2201 2200 matches = self.magic_color_matches(context.line_with_cursor)
2202 2201 return _convert_matcher_v1_result_to_v2(matches, type="param")
2203 2202
2204 2203 def magic_color_matches(self, text: str) -> List[str]:
2205 2204 """Match color schemes for %colors magic.
2206 2205
2207 2206 .. deprecated:: 8.6
2208 2207 You can use :meth:`magic_color_matcher` instead.
2209 2208 """
2210 2209 texts = text.split()
2211 2210 if text.endswith(' '):
2212 2211 # .split() strips off the trailing whitespace. Add '' back
2213 2212 # so that: '%colors ' -> ['%colors', '']
2214 2213 texts.append('')
2215 2214
2216 2215 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
2217 2216 prefix = texts[1]
2218 2217 return [ color for color in InspectColors.keys()
2219 2218 if color.startswith(prefix) ]
2220 2219 return []
2221 2220
2222 2221 @context_matcher(identifier="IPCompleter.jedi_matcher")
2223 2222 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
2224 2223 matches = self._jedi_matches(
2225 2224 cursor_column=context.cursor_position,
2226 2225 cursor_line=context.cursor_line,
2227 2226 text=context.full_text,
2228 2227 )
2229 2228 return {
2230 2229 "completions": matches,
2231 2230 # static analysis should not suppress other matchers
2232 2231 "suppress": False,
2233 2232 }
2234 2233
2235 2234 def _jedi_matches(
2236 2235 self, cursor_column: int, cursor_line: int, text: str
2237 2236 ) -> Iterator[_JediCompletionLike]:
2238 2237 """
2239 2238 Return a list of :any:`jedi.api.Completion`\\s object from a ``text`` and
2240 2239 cursor position.
2241 2240
2242 2241 Parameters
2243 2242 ----------
2244 2243 cursor_column : int
2245 2244 column position of the cursor in ``text``, 0-indexed.
2246 2245 cursor_line : int
2247 2246 line position of the cursor in ``text``, 0-indexed
2248 2247 text : str
2249 2248 text to complete
2250 2249
2251 2250 Notes
2252 2251 -----
2253 2252 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
2254 2253 object containing a string with the Jedi debug information attached.
2255 2254
2256 2255 .. deprecated:: 8.6
2257 2256 You can use :meth:`_jedi_matcher` instead.
2258 2257 """
2259 2258 namespaces = [self.namespace]
2260 2259 if self.global_namespace is not None:
2261 2260 namespaces.append(self.global_namespace)
2262 2261
2263 2262 completion_filter = lambda x:x
2264 2263 offset = cursor_to_position(text, cursor_line, cursor_column)
2265 2264 # filter output if we are completing for object members
2266 2265 if offset:
2267 2266 pre = text[offset-1]
2268 2267 if pre == '.':
2269 2268 if self.omit__names == 2:
2270 2269 completion_filter = lambda c:not c.name.startswith('_')
2271 2270 elif self.omit__names == 1:
2272 2271 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
2273 2272 elif self.omit__names == 0:
2274 2273 completion_filter = lambda x:x
2275 2274 else:
2276 2275 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
2277 2276
2278 2277 interpreter = jedi.Interpreter(text[:offset], namespaces)
2279 2278 try_jedi = True
2280 2279
2281 2280 try:
2282 2281 # find the first token in the current tree -- if it is a ' or " then we are in a string
2283 2282 completing_string = False
2284 2283 try:
2285 2284 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
2286 2285 except StopIteration:
2287 2286 pass
2288 2287 else:
2289 2288 # note the value may be ', ", or it may also be ''' or """, or
2290 2289 # in some cases, """what/you/typed..., but all of these are
2291 2290 # strings.
2292 2291 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
2293 2292
2294 2293 # if we are in a string jedi is likely not the right candidate for
2295 2294 # now. Skip it.
2296 2295 try_jedi = not completing_string
2297 2296 except Exception as e:
2298 2297 # many of things can go wrong, we are using private API just don't crash.
2299 2298 if self.debug:
2300 2299 print("Error detecting if completing a non-finished string :", e, '|')
2301 2300
2302 2301 if not try_jedi:
2303 2302 return iter([])
2304 2303 try:
2305 2304 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2306 2305 except Exception as e:
2307 2306 if self.debug:
2308 2307 return iter(
2309 2308 [
2310 2309 _FakeJediCompletion(
2311 2310 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2312 2311 % (e)
2313 2312 )
2314 2313 ]
2315 2314 )
2316 2315 else:
2317 2316 return iter([])
2318 2317
2319 2318 @context_matcher()
2320 2319 def python_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2321 2320 """Match attributes or global python names"""
2322 2321 text = context.line_with_cursor
2323 2322 if "." in text:
2324 2323 try:
2325 2324 matches, fragment = self._attr_matches(text, include_prefix=False)
2326 2325 if text.endswith(".") and self.omit__names:
2327 2326 if self.omit__names == 1:
2328 2327 # true if txt is _not_ a __ name, false otherwise:
2329 2328 no__name = lambda txt: re.match(r".*\.__.*?__", txt) is None
2330 2329 else:
2331 2330 # true if txt is _not_ a _ name, false otherwise:
2332 2331 no__name = (
2333 2332 lambda txt: re.match(r"\._.*?", txt[txt.rindex(".") :])
2334 2333 is None
2335 2334 )
2336 2335 matches = filter(no__name, matches)
2337 2336 return _convert_matcher_v1_result_to_v2(
2338 2337 matches, type="attribute", fragment=fragment
2339 2338 )
2340 2339 except NameError:
2341 2340 # catches <undefined attributes>.<tab>
2342 2341 matches = []
2343 2342 return _convert_matcher_v1_result_to_v2(matches, type="attribute")
2344 2343 else:
2345 2344 matches = self.global_matches(context.token)
2346 2345 # TODO: maybe distinguish between functions, modules and just "variables"
2347 2346 return _convert_matcher_v1_result_to_v2(matches, type="variable")
2348 2347
2349 2348 @completion_matcher(api_version=1)
2350 2349 def python_matches(self, text: str) -> Iterable[str]:
2351 2350 """Match attributes or global python names.
2352 2351
2353 2352 .. deprecated:: 8.27
2354 2353 You can use :meth:`python_matcher` instead."""
2355 2354 if "." in text:
2356 2355 try:
2357 2356 matches = self.attr_matches(text)
2358 2357 if text.endswith('.') and self.omit__names:
2359 2358 if self.omit__names == 1:
2360 2359 # true if txt is _not_ a __ name, false otherwise:
2361 2360 no__name = (lambda txt:
2362 2361 re.match(r'.*\.__.*?__',txt) is None)
2363 2362 else:
2364 2363 # true if txt is _not_ a _ name, false otherwise:
2365 2364 no__name = (lambda txt:
2366 2365 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
2367 2366 matches = filter(no__name, matches)
2368 2367 except NameError:
2369 2368 # catches <undefined attributes>.<tab>
2370 2369 matches = []
2371 2370 else:
2372 2371 matches = self.global_matches(text)
2373 2372 return matches
2374 2373
2375 2374 def _default_arguments_from_docstring(self, doc):
2376 2375 """Parse the first line of docstring for call signature.
2377 2376
2378 2377 Docstring should be of the form 'min(iterable[, key=func])\n'.
2379 2378 It can also parse cython docstring of the form
2380 2379 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2381 2380 """
2382 2381 if doc is None:
2383 2382 return []
2384 2383
2385 2384 #care only the firstline
2386 2385 line = doc.lstrip().splitlines()[0]
2387 2386
2388 2387 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2389 2388 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2390 2389 sig = self.docstring_sig_re.search(line)
2391 2390 if sig is None:
2392 2391 return []
2393 2392 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2394 2393 sig = sig.groups()[0].split(',')
2395 2394 ret = []
2396 2395 for s in sig:
2397 2396 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2398 2397 ret += self.docstring_kwd_re.findall(s)
2399 2398 return ret
2400 2399
2401 2400 def _default_arguments(self, obj):
2402 2401 """Return the list of default arguments of obj if it is callable,
2403 2402 or empty list otherwise."""
2404 2403 call_obj = obj
2405 2404 ret = []
2406 2405 if inspect.isbuiltin(obj):
2407 2406 pass
2408 2407 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2409 2408 if inspect.isclass(obj):
2410 2409 #for cython embedsignature=True the constructor docstring
2411 2410 #belongs to the object itself not __init__
2412 2411 ret += self._default_arguments_from_docstring(
2413 2412 getattr(obj, '__doc__', ''))
2414 2413 # for classes, check for __init__,__new__
2415 2414 call_obj = (getattr(obj, '__init__', None) or
2416 2415 getattr(obj, '__new__', None))
2417 2416 # for all others, check if they are __call__able
2418 2417 elif hasattr(obj, '__call__'):
2419 2418 call_obj = obj.__call__
2420 2419 ret += self._default_arguments_from_docstring(
2421 2420 getattr(call_obj, '__doc__', ''))
2422 2421
2423 2422 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2424 2423 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2425 2424
2426 2425 try:
2427 2426 sig = inspect.signature(obj)
2428 2427 ret.extend(k for k, v in sig.parameters.items() if
2429 2428 v.kind in _keeps)
2430 2429 except ValueError:
2431 2430 pass
2432 2431
2433 2432 return list(set(ret))
2434 2433
2435 2434 @context_matcher()
2436 2435 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2437 2436 """Match named parameters (kwargs) of the last open function."""
2438 2437 matches = self.python_func_kw_matches(context.token)
2439 2438 return _convert_matcher_v1_result_to_v2(matches, type="param")
2440 2439
2441 2440 def python_func_kw_matches(self, text):
2442 2441 """Match named parameters (kwargs) of the last open function.
2443 2442
2444 2443 .. deprecated:: 8.6
2445 2444 You can use :meth:`python_func_kw_matcher` instead.
2446 2445 """
2447 2446
2448 2447 if "." in text: # a parameter cannot be dotted
2449 2448 return []
2450 2449 try: regexp = self.__funcParamsRegex
2451 2450 except AttributeError:
2452 2451 regexp = self.__funcParamsRegex = re.compile(r'''
2453 2452 '.*?(?<!\\)' | # single quoted strings or
2454 2453 ".*?(?<!\\)" | # double quoted strings or
2455 2454 \w+ | # identifier
2456 2455 \S # other characters
2457 2456 ''', re.VERBOSE | re.DOTALL)
2458 2457 # 1. find the nearest identifier that comes before an unclosed
2459 2458 # parenthesis before the cursor
2460 2459 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2461 2460 tokens = regexp.findall(self.text_until_cursor)
2462 2461 iterTokens = reversed(tokens); openPar = 0
2463 2462
2464 2463 for token in iterTokens:
2465 2464 if token == ')':
2466 2465 openPar -= 1
2467 2466 elif token == '(':
2468 2467 openPar += 1
2469 2468 if openPar > 0:
2470 2469 # found the last unclosed parenthesis
2471 2470 break
2472 2471 else:
2473 2472 return []
2474 2473 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2475 2474 ids = []
2476 2475 isId = re.compile(r'\w+$').match
2477 2476
2478 2477 while True:
2479 2478 try:
2480 2479 ids.append(next(iterTokens))
2481 2480 if not isId(ids[-1]):
2482 2481 ids.pop(); break
2483 2482 if not next(iterTokens) == '.':
2484 2483 break
2485 2484 except StopIteration:
2486 2485 break
2487 2486
2488 2487 # Find all named arguments already assigned to, as to avoid suggesting
2489 2488 # them again
2490 2489 usedNamedArgs = set()
2491 2490 par_level = -1
2492 2491 for token, next_token in zip(tokens, tokens[1:]):
2493 2492 if token == '(':
2494 2493 par_level += 1
2495 2494 elif token == ')':
2496 2495 par_level -= 1
2497 2496
2498 2497 if par_level != 0:
2499 2498 continue
2500 2499
2501 2500 if next_token != '=':
2502 2501 continue
2503 2502
2504 2503 usedNamedArgs.add(token)
2505 2504
2506 2505 argMatches = []
2507 2506 try:
2508 2507 callableObj = '.'.join(ids[::-1])
2509 2508 namedArgs = self._default_arguments(eval(callableObj,
2510 2509 self.namespace))
2511 2510
2512 2511 # Remove used named arguments from the list, no need to show twice
2513 2512 for namedArg in set(namedArgs) - usedNamedArgs:
2514 2513 if namedArg.startswith(text):
2515 2514 argMatches.append("%s=" %namedArg)
2516 2515 except:
2517 2516 pass
2518 2517
2519 2518 return argMatches
2520 2519
2521 2520 @staticmethod
2522 2521 def _get_keys(obj: Any) -> List[Any]:
2523 2522 # Objects can define their own completions by defining an
2524 2523 # _ipy_key_completions_() method.
2525 2524 method = get_real_method(obj, '_ipython_key_completions_')
2526 2525 if method is not None:
2527 2526 return method()
2528 2527
2529 2528 # Special case some common in-memory dict-like types
2530 2529 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2531 2530 try:
2532 2531 return list(obj.keys())
2533 2532 except Exception:
2534 2533 return []
2535 2534 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2536 2535 try:
2537 2536 return list(obj.obj.keys())
2538 2537 except Exception:
2539 2538 return []
2540 2539 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2541 2540 _safe_isinstance(obj, 'numpy', 'void'):
2542 2541 return obj.dtype.names or []
2543 2542 return []
2544 2543
2545 2544 @context_matcher()
2546 2545 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2547 2546 """Match string keys in a dictionary, after e.g. ``foo[``."""
2548 2547 matches = self.dict_key_matches(context.token)
2549 2548 return _convert_matcher_v1_result_to_v2(
2550 2549 matches, type="dict key", suppress_if_matches=True
2551 2550 )
2552 2551
2553 2552 def dict_key_matches(self, text: str) -> List[str]:
2554 2553 """Match string keys in a dictionary, after e.g. ``foo[``.
2555 2554
2556 2555 .. deprecated:: 8.6
2557 2556 You can use :meth:`dict_key_matcher` instead.
2558 2557 """
2559 2558
2560 2559 # Short-circuit on closed dictionary (regular expression would
2561 2560 # not match anyway, but would take quite a while).
2562 2561 if self.text_until_cursor.strip().endswith("]"):
2563 2562 return []
2564 2563
2565 2564 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2566 2565
2567 2566 if match is None:
2568 2567 return []
2569 2568
2570 2569 expr, prior_tuple_keys, key_prefix = match.groups()
2571 2570
2572 2571 obj = self._evaluate_expr(expr)
2573 2572
2574 2573 if obj is not_found:
2575 2574 return []
2576 2575
2577 2576 keys = self._get_keys(obj)
2578 2577 if not keys:
2579 2578 return keys
2580 2579
2581 2580 tuple_prefix = guarded_eval(
2582 2581 prior_tuple_keys,
2583 2582 EvaluationContext(
2584 2583 globals=self.global_namespace,
2585 2584 locals=self.namespace,
2586 2585 evaluation=self.evaluation, # type: ignore
2587 2586 in_subscript=True,
2588 2587 ),
2589 2588 )
2590 2589
2591 2590 closing_quote, token_offset, matches = match_dict_keys(
2592 2591 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2593 2592 )
2594 2593 if not matches:
2595 2594 return []
2596 2595
2597 2596 # get the cursor position of
2598 2597 # - the text being completed
2599 2598 # - the start of the key text
2600 2599 # - the start of the completion
2601 2600 text_start = len(self.text_until_cursor) - len(text)
2602 2601 if key_prefix:
2603 2602 key_start = match.start(3)
2604 2603 completion_start = key_start + token_offset
2605 2604 else:
2606 2605 key_start = completion_start = match.end()
2607 2606
2608 2607 # grab the leading prefix, to make sure all completions start with `text`
2609 2608 if text_start > key_start:
2610 2609 leading = ''
2611 2610 else:
2612 2611 leading = text[text_start:completion_start]
2613 2612
2614 2613 # append closing quote and bracket as appropriate
2615 2614 # this is *not* appropriate if the opening quote or bracket is outside
2616 2615 # the text given to this method, e.g. `d["""a\nt
2617 2616 can_close_quote = False
2618 2617 can_close_bracket = False
2619 2618
2620 2619 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2621 2620
2622 2621 if continuation.startswith(closing_quote):
2623 2622 # do not close if already closed, e.g. `d['a<tab>'`
2624 2623 continuation = continuation[len(closing_quote) :]
2625 2624 else:
2626 2625 can_close_quote = True
2627 2626
2628 2627 continuation = continuation.strip()
2629 2628
2630 2629 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2631 2630 # handling it is out of scope, so let's avoid appending suffixes.
2632 2631 has_known_tuple_handling = isinstance(obj, dict)
2633 2632
2634 2633 can_close_bracket = (
2635 2634 not continuation.startswith("]") and self.auto_close_dict_keys
2636 2635 )
2637 2636 can_close_tuple_item = (
2638 2637 not continuation.startswith(",")
2639 2638 and has_known_tuple_handling
2640 2639 and self.auto_close_dict_keys
2641 2640 )
2642 2641 can_close_quote = can_close_quote and self.auto_close_dict_keys
2643 2642
2644 2643 # fast path if closing quote should be appended but not suffix is allowed
2645 2644 if not can_close_quote and not can_close_bracket and closing_quote:
2646 2645 return [leading + k for k in matches]
2647 2646
2648 2647 results = []
2649 2648
2650 2649 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2651 2650
2652 2651 for k, state_flag in matches.items():
2653 2652 result = leading + k
2654 2653 if can_close_quote and closing_quote:
2655 2654 result += closing_quote
2656 2655
2657 2656 if state_flag == end_of_tuple_or_item:
2658 2657 # We do not know which suffix to add,
2659 2658 # e.g. both tuple item and string
2660 2659 # match this item.
2661 2660 pass
2662 2661
2663 2662 if state_flag in end_of_tuple_or_item and can_close_bracket:
2664 2663 result += "]"
2665 2664 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2666 2665 result += ", "
2667 2666 results.append(result)
2668 2667 return results
2669 2668
2670 2669 @context_matcher()
2671 2670 def unicode_name_matcher(self, context: CompletionContext):
2672 2671 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2673 2672 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2674 2673 return _convert_matcher_v1_result_to_v2(
2675 2674 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2676 2675 )
2677 2676
2678 2677 @staticmethod
2679 2678 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2680 2679 """Match Latex-like syntax for unicode characters base
2681 2680 on the name of the character.
2682 2681
2683 2682 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2684 2683
2685 2684 Works only on valid python 3 identifier, or on combining characters that
2686 2685 will combine to form a valid identifier.
2687 2686 """
2688 2687 slashpos = text.rfind('\\')
2689 2688 if slashpos > -1:
2690 2689 s = text[slashpos+1:]
2691 2690 try :
2692 2691 unic = unicodedata.lookup(s)
2693 2692 # allow combining chars
2694 2693 if ('a'+unic).isidentifier():
2695 2694 return '\\'+s,[unic]
2696 2695 except KeyError:
2697 2696 pass
2698 2697 return '', []
2699 2698
2700 2699 @context_matcher()
2701 2700 def latex_name_matcher(self, context: CompletionContext):
2702 2701 """Match Latex syntax for unicode characters.
2703 2702
2704 2703 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2705 2704 """
2706 2705 fragment, matches = self.latex_matches(context.text_until_cursor)
2707 2706 return _convert_matcher_v1_result_to_v2(
2708 2707 matches, type="latex", fragment=fragment, suppress_if_matches=True
2709 2708 )
2710 2709
2711 2710 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2712 2711 """Match Latex syntax for unicode characters.
2713 2712
2714 2713 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2715 2714
2716 2715 .. deprecated:: 8.6
2717 2716 You can use :meth:`latex_name_matcher` instead.
2718 2717 """
2719 2718 slashpos = text.rfind('\\')
2720 2719 if slashpos > -1:
2721 2720 s = text[slashpos:]
2722 2721 if s in latex_symbols:
2723 2722 # Try to complete a full latex symbol to unicode
2724 2723 # \\alpha -> Ξ±
2725 2724 return s, [latex_symbols[s]]
2726 2725 else:
2727 2726 # If a user has partially typed a latex symbol, give them
2728 2727 # a full list of options \al -> [\aleph, \alpha]
2729 2728 matches = [k for k in latex_symbols if k.startswith(s)]
2730 2729 if matches:
2731 2730 return s, matches
2732 2731 return '', ()
2733 2732
2734 2733 @context_matcher()
2735 2734 def custom_completer_matcher(self, context):
2736 2735 """Dispatch custom completer.
2737 2736
2738 2737 If a match is found, suppresses all other matchers except for Jedi.
2739 2738 """
2740 2739 matches = self.dispatch_custom_completer(context.token) or []
2741 2740 result = _convert_matcher_v1_result_to_v2(
2742 2741 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2743 2742 )
2744 2743 result["ordered"] = True
2745 2744 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2746 2745 return result
2747 2746
2748 2747 def dispatch_custom_completer(self, text):
2749 2748 """
2750 2749 .. deprecated:: 8.6
2751 2750 You can use :meth:`custom_completer_matcher` instead.
2752 2751 """
2753 2752 if not self.custom_completers:
2754 2753 return
2755 2754
2756 2755 line = self.line_buffer
2757 2756 if not line.strip():
2758 2757 return None
2759 2758
2760 2759 # Create a little structure to pass all the relevant information about
2761 2760 # the current completion to any custom completer.
2762 2761 event = SimpleNamespace()
2763 2762 event.line = line
2764 2763 event.symbol = text
2765 2764 cmd = line.split(None,1)[0]
2766 2765 event.command = cmd
2767 2766 event.text_until_cursor = self.text_until_cursor
2768 2767
2769 2768 # for foo etc, try also to find completer for %foo
2770 2769 if not cmd.startswith(self.magic_escape):
2771 2770 try_magic = self.custom_completers.s_matches(
2772 2771 self.magic_escape + cmd)
2773 2772 else:
2774 2773 try_magic = []
2775 2774
2776 2775 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2777 2776 try_magic,
2778 2777 self.custom_completers.flat_matches(self.text_until_cursor)):
2779 2778 try:
2780 2779 res = c(event)
2781 2780 if res:
2782 2781 # first, try case sensitive match
2783 2782 withcase = [r for r in res if r.startswith(text)]
2784 2783 if withcase:
2785 2784 return withcase
2786 2785 # if none, then case insensitive ones are ok too
2787 2786 text_low = text.lower()
2788 2787 return [r for r in res if r.lower().startswith(text_low)]
2789 2788 except TryNext:
2790 2789 pass
2791 2790 except KeyboardInterrupt:
2792 2791 """
2793 2792 If custom completer take too long,
2794 2793 let keyboard interrupt abort and return nothing.
2795 2794 """
2796 2795 break
2797 2796
2798 2797 return None
2799 2798
2800 2799 def completions(self, text: str, offset: int)->Iterator[Completion]:
2801 2800 """
2802 2801 Returns an iterator over the possible completions
2803 2802
2804 2803 .. warning::
2805 2804
2806 2805 Unstable
2807 2806
2808 2807 This function is unstable, API may change without warning.
2809 2808 It will also raise unless use in proper context manager.
2810 2809
2811 2810 Parameters
2812 2811 ----------
2813 2812 text : str
2814 2813 Full text of the current input, multi line string.
2815 2814 offset : int
2816 2815 Integer representing the position of the cursor in ``text``. Offset
2817 2816 is 0-based indexed.
2818 2817
2819 2818 Yields
2820 2819 ------
2821 2820 Completion
2822 2821
2823 2822 Notes
2824 2823 -----
2825 2824 The cursor on a text can either be seen as being "in between"
2826 2825 characters or "On" a character depending on the interface visible to
2827 2826 the user. For consistency the cursor being on "in between" characters X
2828 2827 and Y is equivalent to the cursor being "on" character Y, that is to say
2829 2828 the character the cursor is on is considered as being after the cursor.
2830 2829
2831 2830 Combining characters may span more that one position in the
2832 2831 text.
2833 2832
2834 2833 .. note::
2835 2834
2836 2835 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2837 2836 fake Completion token to distinguish completion returned by Jedi
2838 2837 and usual IPython completion.
2839 2838
2840 2839 .. note::
2841 2840
2842 2841 Completions are not completely deduplicated yet. If identical
2843 2842 completions are coming from different sources this function does not
2844 2843 ensure that each completion object will only be present once.
2845 2844 """
2846 2845 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2847 2846 "It may change without warnings. "
2848 2847 "Use in corresponding context manager.",
2849 2848 category=ProvisionalCompleterWarning, stacklevel=2)
2850 2849
2851 2850 seen = set()
2852 2851 profiler:Optional[cProfile.Profile]
2853 2852 try:
2854 2853 if self.profile_completions:
2855 2854 import cProfile
2856 2855 profiler = cProfile.Profile()
2857 2856 profiler.enable()
2858 2857 else:
2859 2858 profiler = None
2860 2859
2861 2860 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2862 2861 if c and (c in seen):
2863 2862 continue
2864 2863 yield c
2865 2864 seen.add(c)
2866 2865 except KeyboardInterrupt:
2867 2866 """if completions take too long and users send keyboard interrupt,
2868 2867 do not crash and return ASAP. """
2869 2868 pass
2870 2869 finally:
2871 2870 if profiler is not None:
2872 2871 profiler.disable()
2873 2872 ensure_dir_exists(self.profiler_output_dir)
2874 2873 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2875 2874 print("Writing profiler output to", output_path)
2876 2875 profiler.dump_stats(output_path)
2877 2876
2878 2877 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2879 2878 """
2880 2879 Core completion module.Same signature as :any:`completions`, with the
2881 2880 extra `timeout` parameter (in seconds).
2882 2881
2883 2882 Computing jedi's completion ``.type`` can be quite expensive (it is a
2884 2883 lazy property) and can require some warm-up, more warm up than just
2885 2884 computing the ``name`` of a completion. The warm-up can be :
2886 2885
2887 2886 - Long warm-up the first time a module is encountered after
2888 2887 install/update: actually build parse/inference tree.
2889 2888
2890 2889 - first time the module is encountered in a session: load tree from
2891 2890 disk.
2892 2891
2893 2892 We don't want to block completions for tens of seconds so we give the
2894 2893 completer a "budget" of ``_timeout`` seconds per invocation to compute
2895 2894 completions types, the completions that have not yet been computed will
2896 2895 be marked as "unknown" an will have a chance to be computed next round
2897 2896 are things get cached.
2898 2897
2899 2898 Keep in mind that Jedi is not the only thing treating the completion so
2900 2899 keep the timeout short-ish as if we take more than 0.3 second we still
2901 2900 have lots of processing to do.
2902 2901
2903 2902 """
2904 2903 deadline = time.monotonic() + _timeout
2905 2904
2906 2905 before = full_text[:offset]
2907 2906 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2908 2907
2909 2908 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2910 2909
2911 2910 def is_non_jedi_result(
2912 2911 result: MatcherResult, identifier: str
2913 2912 ) -> TypeGuard[SimpleMatcherResult]:
2914 2913 return identifier != jedi_matcher_id
2915 2914
2916 2915 results = self._complete(
2917 2916 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2918 2917 )
2919 2918
2920 2919 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2921 2920 identifier: result
2922 2921 for identifier, result in results.items()
2923 2922 if is_non_jedi_result(result, identifier)
2924 2923 }
2925 2924
2926 2925 jedi_matches = (
2927 2926 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2928 2927 if jedi_matcher_id in results
2929 2928 else ()
2930 2929 )
2931 2930
2932 2931 iter_jm = iter(jedi_matches)
2933 2932 if _timeout:
2934 2933 for jm in iter_jm:
2935 2934 try:
2936 2935 type_ = jm.type
2937 2936 except Exception:
2938 2937 if self.debug:
2939 2938 print("Error in Jedi getting type of ", jm)
2940 2939 type_ = None
2941 2940 delta = len(jm.name_with_symbols) - len(jm.complete)
2942 2941 if type_ == 'function':
2943 2942 signature = _make_signature(jm)
2944 2943 else:
2945 2944 signature = ''
2946 2945 yield Completion(start=offset - delta,
2947 2946 end=offset,
2948 2947 text=jm.name_with_symbols,
2949 2948 type=type_,
2950 2949 signature=signature,
2951 2950 _origin='jedi')
2952 2951
2953 2952 if time.monotonic() > deadline:
2954 2953 break
2955 2954
2956 2955 for jm in iter_jm:
2957 2956 delta = len(jm.name_with_symbols) - len(jm.complete)
2958 2957 yield Completion(
2959 2958 start=offset - delta,
2960 2959 end=offset,
2961 2960 text=jm.name_with_symbols,
2962 2961 type=_UNKNOWN_TYPE, # don't compute type for speed
2963 2962 _origin="jedi",
2964 2963 signature="",
2965 2964 )
2966 2965
2967 2966 # TODO:
2968 2967 # Suppress this, right now just for debug.
2969 2968 if jedi_matches and non_jedi_results and self.debug:
2970 2969 some_start_offset = before.rfind(
2971 2970 next(iter(non_jedi_results.values()))["matched_fragment"]
2972 2971 )
2973 2972 yield Completion(
2974 2973 start=some_start_offset,
2975 2974 end=offset,
2976 2975 text="--jedi/ipython--",
2977 2976 _origin="debug",
2978 2977 type="none",
2979 2978 signature="",
2980 2979 )
2981 2980
2982 2981 ordered: List[Completion] = []
2983 2982 sortable: List[Completion] = []
2984 2983
2985 2984 for origin, result in non_jedi_results.items():
2986 2985 matched_text = result["matched_fragment"]
2987 2986 start_offset = before.rfind(matched_text)
2988 2987 is_ordered = result.get("ordered", False)
2989 2988 container = ordered if is_ordered else sortable
2990 2989
2991 2990 # I'm unsure if this is always true, so let's assert and see if it
2992 2991 # crash
2993 2992 assert before.endswith(matched_text)
2994 2993
2995 2994 for simple_completion in result["completions"]:
2996 2995 completion = Completion(
2997 2996 start=start_offset,
2998 2997 end=offset,
2999 2998 text=simple_completion.text,
3000 2999 _origin=origin,
3001 3000 signature="",
3002 3001 type=simple_completion.type or _UNKNOWN_TYPE,
3003 3002 )
3004 3003 container.append(completion)
3005 3004
3006 3005 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
3007 3006 :MATCHES_LIMIT
3008 3007 ]
3009 3008
3010 3009 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
3011 3010 """Find completions for the given text and line context.
3012 3011
3013 3012 Note that both the text and the line_buffer are optional, but at least
3014 3013 one of them must be given.
3015 3014
3016 3015 Parameters
3017 3016 ----------
3018 3017 text : string, optional
3019 3018 Text to perform the completion on. If not given, the line buffer
3020 3019 is split using the instance's CompletionSplitter object.
3021 3020 line_buffer : string, optional
3022 3021 If not given, the completer attempts to obtain the current line
3023 3022 buffer via readline. This keyword allows clients which are
3024 3023 requesting for text completions in non-readline contexts to inform
3025 3024 the completer of the entire text.
3026 3025 cursor_pos : int, optional
3027 3026 Index of the cursor in the full line buffer. Should be provided by
3028 3027 remote frontends where kernel has no access to frontend state.
3029 3028
3030 3029 Returns
3031 3030 -------
3032 3031 Tuple of two items:
3033 3032 text : str
3034 3033 Text that was actually used in the completion.
3035 3034 matches : list
3036 3035 A list of completion matches.
3037 3036
3038 3037 Notes
3039 3038 -----
3040 3039 This API is likely to be deprecated and replaced by
3041 3040 :any:`IPCompleter.completions` in the future.
3042 3041
3043 3042 """
3044 3043 warnings.warn('`Completer.complete` is pending deprecation since '
3045 3044 'IPython 6.0 and will be replaced by `Completer.completions`.',
3046 3045 PendingDeprecationWarning)
3047 3046 # potential todo, FOLD the 3rd throw away argument of _complete
3048 3047 # into the first 2 one.
3049 3048 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
3050 3049 # TODO: should we deprecate now, or does it stay?
3051 3050
3052 3051 results = self._complete(
3053 3052 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
3054 3053 )
3055 3054
3056 3055 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3057 3056
3058 3057 return self._arrange_and_extract(
3059 3058 results,
3060 3059 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
3061 3060 skip_matchers={jedi_matcher_id},
3062 3061 # this API does not support different start/end positions (fragments of token).
3063 3062 abort_if_offset_changes=True,
3064 3063 )
3065 3064
3066 3065 def _arrange_and_extract(
3067 3066 self,
3068 3067 results: Dict[str, MatcherResult],
3069 3068 skip_matchers: Set[str],
3070 3069 abort_if_offset_changes: bool,
3071 3070 ):
3072 3071 sortable: List[AnyMatcherCompletion] = []
3073 3072 ordered: List[AnyMatcherCompletion] = []
3074 3073 most_recent_fragment = None
3075 3074 for identifier, result in results.items():
3076 3075 if identifier in skip_matchers:
3077 3076 continue
3078 3077 if not result["completions"]:
3079 3078 continue
3080 3079 if not most_recent_fragment:
3081 3080 most_recent_fragment = result["matched_fragment"]
3082 3081 if (
3083 3082 abort_if_offset_changes
3084 3083 and result["matched_fragment"] != most_recent_fragment
3085 3084 ):
3086 3085 break
3087 3086 if result.get("ordered", False):
3088 3087 ordered.extend(result["completions"])
3089 3088 else:
3090 3089 sortable.extend(result["completions"])
3091 3090
3092 3091 if not most_recent_fragment:
3093 3092 most_recent_fragment = "" # to satisfy typechecker (and just in case)
3094 3093
3095 3094 return most_recent_fragment, [
3096 3095 m.text for m in self._deduplicate(ordered + self._sort(sortable))
3097 3096 ]
3098 3097
3099 3098 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
3100 3099 full_text=None) -> _CompleteResult:
3101 3100 """
3102 3101 Like complete but can also returns raw jedi completions as well as the
3103 3102 origin of the completion text. This could (and should) be made much
3104 3103 cleaner but that will be simpler once we drop the old (and stateful)
3105 3104 :any:`complete` API.
3106 3105
3107 3106 With current provisional API, cursor_pos act both (depending on the
3108 3107 caller) as the offset in the ``text`` or ``line_buffer``, or as the
3109 3108 ``column`` when passing multiline strings this could/should be renamed
3110 3109 but would add extra noise.
3111 3110
3112 3111 Parameters
3113 3112 ----------
3114 3113 cursor_line
3115 3114 Index of the line the cursor is on. 0 indexed.
3116 3115 cursor_pos
3117 3116 Position of the cursor in the current line/line_buffer/text. 0
3118 3117 indexed.
3119 3118 line_buffer : optional, str
3120 3119 The current line the cursor is in, this is mostly due to legacy
3121 3120 reason that readline could only give a us the single current line.
3122 3121 Prefer `full_text`.
3123 3122 text : str
3124 3123 The current "token" the cursor is in, mostly also for historical
3125 3124 reasons. as the completer would trigger only after the current line
3126 3125 was parsed.
3127 3126 full_text : str
3128 3127 Full text of the current cell.
3129 3128
3130 3129 Returns
3131 3130 -------
3132 3131 An ordered dictionary where keys are identifiers of completion
3133 3132 matchers and values are ``MatcherResult``s.
3134 3133 """
3135 3134
3136 3135 # if the cursor position isn't given, the only sane assumption we can
3137 3136 # make is that it's at the end of the line (the common case)
3138 3137 if cursor_pos is None:
3139 3138 cursor_pos = len(line_buffer) if text is None else len(text)
3140 3139
3141 3140 if self.use_main_ns:
3142 3141 self.namespace = __main__.__dict__
3143 3142
3144 3143 # if text is either None or an empty string, rely on the line buffer
3145 3144 if (not line_buffer) and full_text:
3146 3145 line_buffer = full_text.split('\n')[cursor_line]
3147 3146 if not text: # issue #11508: check line_buffer before calling split_line
3148 3147 text = (
3149 3148 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
3150 3149 )
3151 3150
3152 3151 # If no line buffer is given, assume the input text is all there was
3153 3152 if line_buffer is None:
3154 3153 line_buffer = text
3155 3154
3156 3155 # deprecated - do not use `line_buffer` in new code.
3157 3156 self.line_buffer = line_buffer
3158 3157 self.text_until_cursor = self.line_buffer[:cursor_pos]
3159 3158
3160 3159 if not full_text:
3161 3160 full_text = line_buffer
3162 3161
3163 3162 context = CompletionContext(
3164 3163 full_text=full_text,
3165 3164 cursor_position=cursor_pos,
3166 3165 cursor_line=cursor_line,
3167 3166 token=text,
3168 3167 limit=MATCHES_LIMIT,
3169 3168 )
3170 3169
3171 3170 # Start with a clean slate of completions
3172 3171 results: Dict[str, MatcherResult] = {}
3173 3172
3174 3173 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3175 3174
3176 3175 suppressed_matchers: Set[str] = set()
3177 3176
3178 3177 matchers = {
3179 3178 _get_matcher_id(matcher): matcher
3180 3179 for matcher in sorted(
3181 3180 self.matchers, key=_get_matcher_priority, reverse=True
3182 3181 )
3183 3182 }
3184 3183
3185 3184 for matcher_id, matcher in matchers.items():
3186 3185 matcher_id = _get_matcher_id(matcher)
3187 3186
3188 3187 if matcher_id in self.disable_matchers:
3189 3188 continue
3190 3189
3191 3190 if matcher_id in results:
3192 3191 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
3193 3192
3194 3193 if matcher_id in suppressed_matchers:
3195 3194 continue
3196 3195
3197 3196 result: MatcherResult
3198 3197 try:
3199 3198 if _is_matcher_v1(matcher):
3200 3199 result = _convert_matcher_v1_result_to_v2(
3201 3200 matcher(text), type=_UNKNOWN_TYPE
3202 3201 )
3203 3202 elif _is_matcher_v2(matcher):
3204 3203 result = matcher(context)
3205 3204 else:
3206 3205 api_version = _get_matcher_api_version(matcher)
3207 3206 raise ValueError(f"Unsupported API version {api_version}")
3208 3207 except:
3209 3208 # Show the ugly traceback if the matcher causes an
3210 3209 # exception, but do NOT crash the kernel!
3211 3210 sys.excepthook(*sys.exc_info())
3212 3211 continue
3213 3212
3214 3213 # set default value for matched fragment if suffix was not selected.
3215 3214 result["matched_fragment"] = result.get("matched_fragment", context.token)
3216 3215
3217 3216 if not suppressed_matchers:
3218 3217 suppression_recommended: Union[bool, Set[str]] = result.get(
3219 3218 "suppress", False
3220 3219 )
3221 3220
3222 3221 suppression_config = (
3223 3222 self.suppress_competing_matchers.get(matcher_id, None)
3224 3223 if isinstance(self.suppress_competing_matchers, dict)
3225 3224 else self.suppress_competing_matchers
3226 3225 )
3227 3226 should_suppress = (
3228 3227 (suppression_config is True)
3229 3228 or (suppression_recommended and (suppression_config is not False))
3230 3229 ) and has_any_completions(result)
3231 3230
3232 3231 if should_suppress:
3233 3232 suppression_exceptions: Set[str] = result.get(
3234 3233 "do_not_suppress", set()
3235 3234 )
3236 3235 if isinstance(suppression_recommended, Iterable):
3237 3236 to_suppress = set(suppression_recommended)
3238 3237 else:
3239 3238 to_suppress = set(matchers)
3240 3239 suppressed_matchers = to_suppress - suppression_exceptions
3241 3240
3242 3241 new_results = {}
3243 3242 for previous_matcher_id, previous_result in results.items():
3244 3243 if previous_matcher_id not in suppressed_matchers:
3245 3244 new_results[previous_matcher_id] = previous_result
3246 3245 results = new_results
3247 3246
3248 3247 results[matcher_id] = result
3249 3248
3250 3249 _, matches = self._arrange_and_extract(
3251 3250 results,
3252 3251 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
3253 3252 # if it was omission, we can remove the filtering step, otherwise remove this comment.
3254 3253 skip_matchers={jedi_matcher_id},
3255 3254 abort_if_offset_changes=False,
3256 3255 )
3257 3256
3258 3257 # populate legacy stateful API
3259 3258 self.matches = matches
3260 3259
3261 3260 return results
3262 3261
3263 3262 @staticmethod
3264 3263 def _deduplicate(
3265 3264 matches: Sequence[AnyCompletion],
3266 3265 ) -> Iterable[AnyCompletion]:
3267 3266 filtered_matches: Dict[str, AnyCompletion] = {}
3268 3267 for match in matches:
3269 3268 text = match.text
3270 3269 if (
3271 3270 text not in filtered_matches
3272 3271 or filtered_matches[text].type == _UNKNOWN_TYPE
3273 3272 ):
3274 3273 filtered_matches[text] = match
3275 3274
3276 3275 return filtered_matches.values()
3277 3276
3278 3277 @staticmethod
3279 3278 def _sort(matches: Sequence[AnyCompletion]):
3280 3279 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3281 3280
3282 3281 @context_matcher()
3283 3282 def fwd_unicode_matcher(self, context: CompletionContext):
3284 3283 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
3285 3284 # TODO: use `context.limit` to terminate early once we matched the maximum
3286 3285 # number that will be used downstream; can be added as an optional to
3287 3286 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
3288 3287 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
3289 3288 return _convert_matcher_v1_result_to_v2(
3290 3289 matches, type="unicode", fragment=fragment, suppress_if_matches=True
3291 3290 )
3292 3291
3293 3292 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
3294 3293 """
3295 3294 Forward match a string starting with a backslash with a list of
3296 3295 potential Unicode completions.
3297 3296
3298 3297 Will compute list of Unicode character names on first call and cache it.
3299 3298
3300 3299 .. deprecated:: 8.6
3301 3300 You can use :meth:`fwd_unicode_matcher` instead.
3302 3301
3303 3302 Returns
3304 3303 -------
3305 3304 At tuple with:
3306 3305 - matched text (empty if no matches)
3307 3306 - list of potential completions, empty tuple otherwise)
3308 3307 """
3309 3308 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
3310 3309 # We could do a faster match using a Trie.
3311 3310
3312 3311 # Using pygtrie the following seem to work:
3313 3312
3314 3313 # s = PrefixSet()
3315 3314
3316 3315 # for c in range(0,0x10FFFF + 1):
3317 3316 # try:
3318 3317 # s.add(unicodedata.name(chr(c)))
3319 3318 # except ValueError:
3320 3319 # pass
3321 3320 # [''.join(k) for k in s.iter(prefix)]
3322 3321
3323 3322 # But need to be timed and adds an extra dependency.
3324 3323
3325 3324 slashpos = text.rfind('\\')
3326 3325 # if text starts with slash
3327 3326 if slashpos > -1:
3328 3327 # PERF: It's important that we don't access self._unicode_names
3329 3328 # until we're inside this if-block. _unicode_names is lazily
3330 3329 # initialized, and it takes a user-noticeable amount of time to
3331 3330 # initialize it, so we don't want to initialize it unless we're
3332 3331 # actually going to use it.
3333 3332 s = text[slashpos + 1 :]
3334 3333 sup = s.upper()
3335 3334 candidates = [x for x in self.unicode_names if x.startswith(sup)]
3336 3335 if candidates:
3337 3336 return s, candidates
3338 3337 candidates = [x for x in self.unicode_names if sup in x]
3339 3338 if candidates:
3340 3339 return s, candidates
3341 3340 splitsup = sup.split(" ")
3342 3341 candidates = [
3343 3342 x for x in self.unicode_names if all(u in x for u in splitsup)
3344 3343 ]
3345 3344 if candidates:
3346 3345 return s, candidates
3347 3346
3348 3347 return "", ()
3349 3348
3350 3349 # if text does not start with slash
3351 3350 else:
3352 3351 return '', ()
3353 3352
3354 3353 @property
3355 3354 def unicode_names(self) -> List[str]:
3356 3355 """List of names of unicode code points that can be completed.
3357 3356
3358 3357 The list is lazily initialized on first access.
3359 3358 """
3360 3359 if self._unicode_names is None:
3361 3360 names = []
3362 3361 for c in range(0,0x10FFFF + 1):
3363 3362 try:
3364 3363 names.append(unicodedata.name(chr(c)))
3365 3364 except ValueError:
3366 3365 pass
3367 3366 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
3368 3367
3369 3368 return self._unicode_names
3370 3369
3371 3370 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
3372 3371 names = []
3373 3372 for start,stop in ranges:
3374 3373 for c in range(start, stop) :
3375 3374 try:
3376 3375 names.append(unicodedata.name(chr(c)))
3377 3376 except ValueError:
3378 3377 pass
3379 3378 return names
General Comments 0
You need to be logged in to leave comments. Login now