##// END OF EJS Templates
Implement guarded evaluation, replace greedy, implement:...
krassowski -
Show More
This diff has been collapsed as it changes many lines, (541 lines changed) Show them Hide them
@@ -0,0 +1,541 b''
1 from typing import Callable, Protocol, Set, Tuple, NamedTuple, Literal, Union
2 import collections
3 import sys
4 import ast
5 import types
6 from functools import cached_property
7 from dataclasses import dataclass, field
8
9
10 class HasGetItem(Protocol):
11 def __getitem__(self, key) -> None: ...
12
13
14 class InstancesHaveGetItem(Protocol):
15 def __call__(self) -> HasGetItem: ...
16
17
18 class HasGetAttr(Protocol):
19 def __getattr__(self, key) -> None: ...
20
21
22 class DoesNotHaveGetAttr(Protocol):
23 pass
24
25 # By default `__getattr__` is not explicitly implemented on most objects
26 MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr]
27
28
29 def unbind_method(func: Callable) -> Union[Callable, None]:
30 """Get unbound method for given bound method.
31
32 Returns None if cannot get unbound method."""
33 owner = getattr(func, '__self__', None)
34 owner_class = type(owner)
35 name = getattr(func, '__name__', None)
36 instance_dict_overrides = getattr(owner, '__dict__', None)
37 if (
38 owner is not None
39 and
40 name
41 and
42 (
43 not instance_dict_overrides
44 or
45 (
46 instance_dict_overrides
47 and name not in instance_dict_overrides
48 )
49 )
50 ):
51 return getattr(owner_class, name)
52
53
54 @dataclass
55 class EvaluationPolicy:
56 allow_locals_access: bool = False
57 allow_globals_access: bool = False
58 allow_item_access: bool = False
59 allow_attr_access: bool = False
60 allow_builtins_access: bool = False
61 allow_any_calls: bool = False
62 allowed_calls: Set[Callable] = field(default_factory=set)
63
64 def can_get_item(self, value, item):
65 return self.allow_item_access
66
67 def can_get_attr(self, value, attr):
68 return self.allow_attr_access
69
70 def can_call(self, func):
71 if self.allow_any_calls:
72 return True
73
74 if func in self.allowed_calls:
75 return True
76
77 owner_method = unbind_method(func)
78 if owner_method and owner_method in self.allowed_calls:
79 return True
80
81 def has_original_dunder_external(value, module_name, access_path, method_name,):
82 try:
83 if module_name not in sys.modules:
84 return False
85 member_type = sys.modules[module_name]
86 for attr in access_path:
87 member_type = getattr(member_type, attr)
88 value_type = type(value)
89 if type(value) == member_type:
90 return True
91 if isinstance(value, member_type):
92 method = getattr(value_type, method_name, None)
93 member_method = getattr(member_type, method_name, None)
94 if member_method == method:
95 return True
96 except (AttributeError, KeyError):
97 return False
98
99
100 def has_original_dunder(
101 value,
102 allowed_types,
103 allowed_methods,
104 allowed_external,
105 method_name
106 ):
107 # note: Python ignores `__getattr__`/`__getitem__` on instances,
108 # we only need to check at class level
109 value_type = type(value)
110
111 # strict type check passes β†’ no need to check method
112 if value_type in allowed_types:
113 return True
114
115 method = getattr(value_type, method_name, None)
116
117 if not method:
118 return None
119
120 if method in allowed_methods:
121 return True
122
123 for module_name, *access_path in allowed_external:
124 if has_original_dunder_external(value, module_name, access_path, method_name):
125 return True
126
127 return False
128
129
130 @dataclass
131 class SelectivePolicy(EvaluationPolicy):
132 allowed_getitem: Set[HasGetItem] = field(default_factory=set)
133 allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set)
134 allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set)
135 allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set)
136
137 def can_get_attr(self, value, attr):
138 has_original_attribute = has_original_dunder(
139 value,
140 allowed_types=self.allowed_getattr,
141 allowed_methods=self._getattribute_methods,
142 allowed_external=self.allowed_getattr_external,
143 method_name='__getattribute__'
144 )
145 has_original_attr = has_original_dunder(
146 value,
147 allowed_types=self.allowed_getattr,
148 allowed_methods=self._getattr_methods,
149 allowed_external=self.allowed_getattr_external,
150 method_name='__getattr__'
151 )
152 # Many objects do not have `__getattr__`, this is fine
153 if has_original_attr is None and has_original_attribute:
154 return True
155
156 # Accept objects without modifications to `__getattr__` and `__getattribute__`
157 return has_original_attr and has_original_attribute
158
159 def get_attr(self, value, attr):
160 if self.can_get_attr(value, attr):
161 return getattr(value, attr)
162
163
164 def can_get_item(self, value, item):
165 """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified."""
166 return has_original_dunder(
167 value,
168 allowed_types=self.allowed_getitem,
169 allowed_methods=self._getitem_methods,
170 allowed_external=self.allowed_getitem_external,
171 method_name='__getitem__'
172 )
173
174 @cached_property
175 def _getitem_methods(self) -> Set[Callable]:
176 return self._safe_get_methods(
177 self.allowed_getitem,
178 '__getitem__'
179 )
180
181 @cached_property
182 def _getattr_methods(self) -> Set[Callable]:
183 return self._safe_get_methods(
184 self.allowed_getattr,
185 '__getattr__'
186 )
187
188 @cached_property
189 def _getattribute_methods(self) -> Set[Callable]:
190 return self._safe_get_methods(
191 self.allowed_getattr,
192 '__getattribute__'
193 )
194
195 def _safe_get_methods(self, classes, name) -> Set[Callable]:
196 return {
197 method
198 for class_ in classes
199 for method in [getattr(class_, name, None)]
200 if method
201 }
202
203
204 class DummyNamedTuple(NamedTuple):
205 pass
206
207
208 class EvaluationContext(NamedTuple):
209 locals_: dict
210 globals_: dict
211 evaluation: Literal['forbidden', 'minimal', 'limitted', 'unsafe', 'dangerous'] = 'forbidden'
212 in_subscript: bool = False
213
214
215 class IdentitySubscript:
216 def __getitem__(self, key):
217 return key
218
219 IDENTITY_SUBSCRIPT = IdentitySubscript()
220 SUBSCRIPT_MARKER = '__SUBSCRIPT_SENTINEL__'
221
222 class GuardRejection(ValueError):
223 pass
224
225
226 def guarded_eval(
227 code: str,
228 context: EvaluationContext
229 ):
230 locals_ = context.locals_
231
232 if context.evaluation == 'forbidden':
233 raise GuardRejection('Forbidden mode')
234
235 # note: not using `ast.literal_eval` as it does not implement
236 # getitem at all, for example it fails on simple `[0][1]`
237
238 if context.in_subscript:
239 # syntatic sugar for ellipsis (:) is only available in susbcripts
240 # so we need to trick the ast parser into thinking that we have
241 # a subscript, but we need to be able to later recognise that we did
242 # it so we can ignore the actual __getitem__ operation
243 if not code:
244 return tuple()
245 locals_ = locals_.copy()
246 locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT
247 code = SUBSCRIPT_MARKER + '[' + code + ']'
248 context = EvaluationContext(**{
249 **context._asdict(),
250 **{'locals_': locals_}
251 })
252
253 if context.evaluation == 'dangerous':
254 return eval(code, context.globals_, context.locals_)
255
256 expression = ast.parse(code, mode='eval')
257
258 return eval_node(expression, context)
259
260 def eval_node(node: Union[ast.AST, None], context: EvaluationContext):
261 """
262 Evaluate AST node in provided context.
263
264 Applies evaluation restrictions defined in the context.
265
266 Currently does not support evaluation of functions with arguments.
267
268 Does not evaluate actions which always have side effects:
269 - class definitions (`class sth: ...`)
270 - function definitions (`def sth: ...`)
271 - variable assignments (`x = 1`)
272 - augumented assignments (`x += 1`)
273 - deletions (`del x`)
274
275 Does not evaluate operations which do not return values:
276 - assertions (`assert x`)
277 - pass (`pass`)
278 - imports (`import x`)
279 - control flow
280 - conditionals (`if x:`) except for terenary IfExp (`a if x else b`)
281 - loops (`for` and `while`)
282 - exception handling
283 """
284 policy = EVALUATION_POLICIES[context.evaluation]
285 if node is None:
286 return None
287 if isinstance(node, ast.Expression):
288 return eval_node(node.body, context)
289 if isinstance(node, ast.BinOp):
290 # TODO: add guards
291 left = eval_node(node.left, context)
292 right = eval_node(node.right, context)
293 if isinstance(node.op, ast.Add):
294 return left + right
295 if isinstance(node.op, ast.Sub):
296 return left - right
297 if isinstance(node.op, ast.Mult):
298 return left * right
299 if isinstance(node.op, ast.Div):
300 return left / right
301 if isinstance(node.op, ast.FloorDiv):
302 return left // right
303 if isinstance(node.op, ast.Mod):
304 return left % right
305 if isinstance(node.op, ast.Pow):
306 return left ** right
307 if isinstance(node.op, ast.LShift):
308 return left << right
309 if isinstance(node.op, ast.RShift):
310 return left >> right
311 if isinstance(node.op, ast.BitOr):
312 return left | right
313 if isinstance(node.op, ast.BitXor):
314 return left ^ right
315 if isinstance(node.op, ast.BitAnd):
316 return left & right
317 if isinstance(node.op, ast.MatMult):
318 return left @ right
319 if isinstance(node, ast.Constant):
320 return node.value
321 if isinstance(node, ast.Index):
322 return eval_node(node.value, context)
323 if isinstance(node, ast.Tuple):
324 return tuple(
325 eval_node(e, context)
326 for e in node.elts
327 )
328 if isinstance(node, ast.List):
329 return [
330 eval_node(e, context)
331 for e in node.elts
332 ]
333 if isinstance(node, ast.Set):
334 return {
335 eval_node(e, context)
336 for e in node.elts
337 }
338 if isinstance(node, ast.Dict):
339 return dict(zip(
340 [eval_node(k, context) for k in node.keys],
341 [eval_node(v, context) for v in node.values]
342 ))
343 if isinstance(node, ast.Slice):
344 return slice(
345 eval_node(node.lower, context),
346 eval_node(node.upper, context),
347 eval_node(node.step, context)
348 )
349 if isinstance(node, ast.ExtSlice):
350 return tuple([
351 eval_node(dim, context)
352 for dim in node.dims
353 ])
354 if isinstance(node, ast.UnaryOp):
355 # TODO: add guards
356 value = eval_node(node.operand, context)
357 if isinstance(node.op, ast.USub):
358 return -value
359 if isinstance(node.op, ast.UAdd):
360 return +value
361 if isinstance(node.op, ast.Invert):
362 return ~value
363 if isinstance(node.op, ast.Not):
364 return not value
365 raise ValueError('Unhandled unary operation:', node.op)
366 if isinstance(node, ast.Subscript):
367 value = eval_node(node.value, context)
368 slice_ = eval_node(node.slice, context)
369 if policy.can_get_item(value, slice_):
370 return value[slice_]
371 raise GuardRejection(
372 'Subscript access (`__getitem__`) for',
373 type(value), # not joined to avoid calling `repr`
374 f' not allowed in {context.evaluation} mode'
375 )
376 if isinstance(node, ast.Name):
377 if policy.allow_locals_access and node.id in context.locals_:
378 return context.locals_[node.id]
379 if policy.allow_globals_access and node.id in context.globals_:
380 return context.globals_[node.id]
381 if policy.allow_builtins_access and node.id in __builtins__:
382 return __builtins__[node.id]
383 if not policy.allow_globals_access and not policy.allow_locals_access:
384 raise GuardRejection(
385 f'Namespace access not allowed in {context.evaluation} mode'
386 )
387 else:
388 raise NameError(f'{node.id} not found in locals nor globals')
389 if isinstance(node, ast.Attribute):
390 value = eval_node(node.value, context)
391 if policy.can_get_attr(value, node.attr):
392 return getattr(value, node.attr)
393 raise GuardRejection(
394 'Attribute access (`__getattr__`) for',
395 type(value), # not joined to avoid calling `repr`
396 f'not allowed in {context.evaluation} mode'
397 )
398 if isinstance(node, ast.IfExp):
399 test = eval_node(node.test, context)
400 if test:
401 return eval_node(node.body, context)
402 else:
403 return eval_node(node.orelse, context)
404 if isinstance(node, ast.Call):
405 func = eval_node(node.func, context)
406 print(node.keywords)
407 if policy.can_call(func) and not node.keywords:
408 args = [
409 eval_node(arg, context)
410 for arg in node.args
411 ]
412 return func(*args)
413 raise GuardRejection(
414 'Call for',
415 func, # not joined to avoid calling `repr`
416 f'not allowed in {context.evaluation} mode'
417 )
418 raise ValueError('Unhandled node', node)
419
420
421 SUPPORTED_EXTERNAL_GETITEM = {
422 ('pandas', 'core', 'indexing', '_iLocIndexer'),
423 ('pandas', 'core', 'indexing', '_LocIndexer'),
424 ('pandas', 'DataFrame'),
425 ('pandas', 'Series'),
426 ('numpy', 'ndarray'),
427 ('numpy', 'void')
428 }
429
430 BUILTIN_GETITEM = {
431 dict,
432 str,
433 bytes,
434 list,
435 tuple,
436 collections.defaultdict,
437 collections.deque,
438 collections.OrderedDict,
439 collections.ChainMap,
440 collections.UserDict,
441 collections.UserList,
442 collections.UserString,
443 DummyNamedTuple,
444 IdentitySubscript
445 }
446
447
448 def _list_methods(cls, source=None):
449 """For use on immutable objects or with methods returning a copy"""
450 return [
451 getattr(cls, k)
452 for k in (source if source else dir(cls))
453 ]
454
455
456 dict_non_mutating_methods = ('copy', 'keys', 'values', 'items')
457 list_non_mutating_methods = ('copy', 'index', 'count')
458 set_non_mutating_methods = set(dir(set)) & set(dir(frozenset))
459
460
461 dict_keys = type({}.keys())
462 method_descriptor = type(list.copy)
463
464 ALLOWED_CALLS = {
465 bytes,
466 *_list_methods(bytes),
467 dict,
468 *_list_methods(dict, dict_non_mutating_methods),
469 dict_keys.isdisjoint,
470 list,
471 *_list_methods(list, list_non_mutating_methods),
472 set,
473 *_list_methods(set, set_non_mutating_methods),
474 frozenset,
475 *_list_methods(frozenset),
476 range,
477 str,
478 *_list_methods(str),
479 tuple,
480 *_list_methods(tuple),
481 collections.deque,
482 *_list_methods(collections.deque, list_non_mutating_methods),
483 collections.defaultdict,
484 *_list_methods(collections.defaultdict, dict_non_mutating_methods),
485 collections.OrderedDict,
486 *_list_methods(collections.OrderedDict, dict_non_mutating_methods),
487 collections.UserDict,
488 *_list_methods(collections.UserDict, dict_non_mutating_methods),
489 collections.UserList,
490 *_list_methods(collections.UserList, list_non_mutating_methods),
491 collections.UserString,
492 *_list_methods(collections.UserString, dir(str)),
493 collections.Counter,
494 *_list_methods(collections.Counter, dict_non_mutating_methods),
495 collections.Counter.elements,
496 collections.Counter.most_common
497 }
498
499 EVALUATION_POLICIES = {
500 'minimal': EvaluationPolicy(
501 allow_builtins_access=True,
502 allow_locals_access=False,
503 allow_globals_access=False,
504 allow_item_access=False,
505 allow_attr_access=False,
506 allowed_calls=set(),
507 allow_any_calls=False
508 ),
509 'limitted': SelectivePolicy(
510 # TODO:
511 # - should reject binary and unary operations if custom methods would be dispatched
512 allowed_getitem=BUILTIN_GETITEM,
513 allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM,
514 allowed_getattr={
515 *BUILTIN_GETITEM,
516 set,
517 frozenset,
518 object,
519 type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`.
520 dict_keys,
521 method_descriptor
522 },
523 allowed_getattr_external={
524 # pandas Series/Frame implements custom `__getattr__`
525 ('pandas', 'DataFrame'),
526 ('pandas', 'Series')
527 },
528 allow_builtins_access=True,
529 allow_locals_access=True,
530 allow_globals_access=True,
531 allowed_calls=ALLOWED_CALLS
532 ),
533 'unsafe': EvaluationPolicy(
534 allow_builtins_access=True,
535 allow_locals_access=True,
536 allow_globals_access=True,
537 allow_attr_access=True,
538 allow_item_access=True,
539 allow_any_calls=True
540 )
541 } No newline at end of file
@@ -0,0 +1,286 b''
1 from typing import NamedTuple
2 from IPython.core.guarded_eval import EvaluationContext, GuardRejection, guarded_eval, unbind_method
3 from IPython.testing import decorators as dec
4 import pytest
5
6
7 def limitted(**kwargs):
8 return EvaluationContext(
9 locals_=kwargs,
10 globals_={},
11 evaluation='limitted'
12 )
13
14
15 def unsafe(**kwargs):
16 return EvaluationContext(
17 locals_=kwargs,
18 globals_={},
19 evaluation='unsafe'
20 )
21
22 @dec.skip_without('pandas')
23 def test_pandas_series_iloc():
24 import pandas as pd
25 series = pd.Series([1], index=['a'])
26 context = limitted(data=series)
27 assert guarded_eval('data.iloc[0]', context) == 1
28
29
30 @dec.skip_without('pandas')
31 def test_pandas_series():
32 import pandas as pd
33 context = limitted(data=pd.Series([1], index=['a']))
34 assert guarded_eval('data["a"]', context) == 1
35 with pytest.raises(KeyError):
36 guarded_eval('data["c"]', context)
37
38
39 @dec.skip_without('pandas')
40 def test_pandas_bad_series():
41 import pandas as pd
42 class BadItemSeries(pd.Series):
43 def __getitem__(self, key):
44 return 'CUSTOM_ITEM'
45
46 class BadAttrSeries(pd.Series):
47 def __getattr__(self, key):
48 return 'CUSTOM_ATTR'
49
50 bad_series = BadItemSeries([1], index=['a'])
51 context = limitted(data=bad_series)
52
53 with pytest.raises(GuardRejection):
54 guarded_eval('data["a"]', context)
55 with pytest.raises(GuardRejection):
56 guarded_eval('data["c"]', context)
57
58 # note: here result is a bit unexpected because
59 # pandas `__getattr__` calls `__getitem__`;
60 # FIXME - special case to handle it?
61 assert guarded_eval('data.a', context) == 'CUSTOM_ITEM'
62
63 context = unsafe(data=bad_series)
64 assert guarded_eval('data["a"]', context) == 'CUSTOM_ITEM'
65
66 bad_attr_series = BadAttrSeries([1], index=['a'])
67 context = limitted(data=bad_attr_series)
68 assert guarded_eval('data["a"]', context) == 1
69 with pytest.raises(GuardRejection):
70 guarded_eval('data.a', context)
71
72
73 @dec.skip_without('pandas')
74 def test_pandas_dataframe_loc():
75 import pandas as pd
76 from pandas.testing import assert_series_equal
77 data = pd.DataFrame([{'a': 1}])
78 context = limitted(data=data)
79 assert_series_equal(
80 guarded_eval('data.loc[:, "a"]', context),
81 data['a']
82 )
83
84
85 def test_named_tuple():
86
87 class GoodNamedTuple(NamedTuple):
88 a: str
89 pass
90
91 class BadNamedTuple(NamedTuple):
92 a: str
93 def __getitem__(self, key):
94 return None
95
96 good = GoodNamedTuple(a='x')
97 bad = BadNamedTuple(a='x')
98
99 context = limitted(data=good)
100 assert guarded_eval('data[0]', context) == 'x'
101
102 context = limitted(data=bad)
103 with pytest.raises(GuardRejection):
104 guarded_eval('data[0]', context)
105
106
107 def test_dict():
108 context = limitted(
109 data={'a': 1, 'b': {'x': 2}, ('x', 'y'): 3}
110 )
111 assert guarded_eval('data["a"]', context) == 1
112 assert guarded_eval('data["b"]', context) == {'x': 2}
113 assert guarded_eval('data["b"]["x"]', context) == 2
114 assert guarded_eval('data["x", "y"]', context) == 3
115
116 assert guarded_eval('data.keys', context)
117
118
119 def test_set():
120 context = limitted(data={'a', 'b'})
121 assert guarded_eval('data.difference', context)
122
123
124 def test_list():
125 context = limitted(data=[1, 2, 3])
126 assert guarded_eval('data[1]', context) == 2
127 assert guarded_eval('data.copy', context)
128
129
130 def test_dict_literal():
131 context = limitted()
132 assert guarded_eval('{}', context) == {}
133 assert guarded_eval('{"a": 1}', context) == {"a": 1}
134
135
136 def test_list_literal():
137 context = limitted()
138 assert guarded_eval('[]', context) == []
139 assert guarded_eval('[1, "a"]', context) == [1, "a"]
140
141
142 def test_set_literal():
143 context = limitted()
144 assert guarded_eval('set()', context) == set()
145 assert guarded_eval('{"a"}', context) == {"a"}
146
147
148 def test_if_expression():
149 context = limitted()
150 assert guarded_eval('2 if True else 3', context) == 2
151 assert guarded_eval('4 if False else 5', context) == 5
152
153
154 def test_object():
155 obj = object()
156 context = limitted(obj=obj)
157 assert guarded_eval('obj.__dir__', context) == obj.__dir__
158
159
160 @pytest.mark.parametrize(
161 "code,expected",
162 [
163 [
164 'int.numerator',
165 int.numerator
166 ],
167 [
168 'float.is_integer',
169 float.is_integer
170 ],
171 [
172 'complex.real',
173 complex.real
174 ]
175 ]
176 )
177 def test_number_attributes(code, expected):
178 assert guarded_eval(code, limitted()) == expected
179
180
181 def test_method_descriptor():
182 context = limitted()
183 assert guarded_eval('list.copy.__name__', context) == 'copy'
184
185
186 @pytest.mark.parametrize(
187 "data,good,bad,expected",
188 [
189 [
190 [1, 2, 3],
191 'data.index(2)',
192 'data.append(4)',
193 1
194 ],
195 [
196 {'a': 1},
197 'data.keys().isdisjoint({})',
198 'data.update()',
199 True
200 ]
201 ]
202 )
203 def test_calls(data, good, bad, expected):
204 context = limitted(data=data)
205 assert guarded_eval(good, context) == expected
206
207 with pytest.raises(GuardRejection):
208 guarded_eval(bad, context)
209
210
211 @pytest.mark.parametrize(
212 "code,expected",
213 [
214 [
215 '(1\n+\n1)',
216 2
217 ],
218 [
219 'list(range(10))[-1:]',
220 [9]
221 ],
222 [
223 'list(range(20))[3:-2:3]',
224 [3, 6, 9, 12, 15]
225 ]
226 ]
227 )
228 def test_literals(code, expected):
229 context = limitted()
230 assert guarded_eval(code, context) == expected
231
232
233 def test_subscript():
234 context = EvaluationContext(
235 locals_={},
236 globals_={},
237 evaluation='limitted',
238 in_subscript=True
239 )
240 empty_slice = slice(None, None, None)
241 assert guarded_eval('', context) == tuple()
242 assert guarded_eval(':', context) == empty_slice
243 assert guarded_eval('1:2:3', context) == slice(1, 2, 3)
244 assert guarded_eval(':, "a"', context) == (empty_slice, "a")
245
246
247 def test_unbind_method():
248 class X(list):
249 def index(self, k):
250 return 'CUSTOM'
251 x = X()
252 assert unbind_method(x.index) is X.index
253 assert unbind_method([].index) is list.index
254
255
256 def test_assumption_instance_attr_do_not_matter():
257 """This is semi-specified in Python documentation.
258
259 However, since the specification says 'not guaranted
260 to work' rather than 'is forbidden to work', future
261 versions could invalidate this assumptions. This test
262 is meant to catch such a change if it ever comes true.
263 """
264 class T:
265 def __getitem__(self, k):
266 return 'a'
267 def __getattr__(self, k):
268 return 'a'
269 t = T()
270 t.__getitem__ = lambda f: 'b'
271 t.__getattr__ = lambda f: 'b'
272 assert t[1] == 'a'
273 assert t[1] == 'a'
274
275
276 def test_assumption_named_tuples_share_getitem():
277 """Check assumption on named tuples sharing __getitem__"""
278 from typing import NamedTuple
279
280 class A(NamedTuple):
281 pass
282
283 class B(NamedTuple):
284 pass
285
286 assert A.__getitem__ == B.__getitem__
@@ -1,2977 +1,3036 b''
1 """Completion for IPython.
1 """Completion for IPython.
2
2
3 This module started as fork of the rlcompleter module in the Python standard
3 This module started as fork of the rlcompleter module in the Python standard
4 library. The original enhancements made to rlcompleter have been sent
4 library. The original enhancements made to rlcompleter have been sent
5 upstream and were accepted as of Python 2.3,
5 upstream and were accepted as of Python 2.3,
6
6
7 This module now support a wide variety of completion mechanism both available
7 This module now support a wide variety of completion mechanism both available
8 for normal classic Python code, as well as completer for IPython specific
8 for normal classic Python code, as well as completer for IPython specific
9 Syntax like magics.
9 Syntax like magics.
10
10
11 Latex and Unicode completion
11 Latex and Unicode completion
12 ============================
12 ============================
13
13
14 IPython and compatible frontends not only can complete your code, but can help
14 IPython and compatible frontends not only can complete your code, but can help
15 you to input a wide range of characters. In particular we allow you to insert
15 you to input a wide range of characters. In particular we allow you to insert
16 a unicode character using the tab completion mechanism.
16 a unicode character using the tab completion mechanism.
17
17
18 Forward latex/unicode completion
18 Forward latex/unicode completion
19 --------------------------------
19 --------------------------------
20
20
21 Forward completion allows you to easily type a unicode character using its latex
21 Forward completion allows you to easily type a unicode character using its latex
22 name, or unicode long description. To do so type a backslash follow by the
22 name, or unicode long description. To do so type a backslash follow by the
23 relevant name and press tab:
23 relevant name and press tab:
24
24
25
25
26 Using latex completion:
26 Using latex completion:
27
27
28 .. code::
28 .. code::
29
29
30 \\alpha<tab>
30 \\alpha<tab>
31 Ξ±
31 Ξ±
32
32
33 or using unicode completion:
33 or using unicode completion:
34
34
35
35
36 .. code::
36 .. code::
37
37
38 \\GREEK SMALL LETTER ALPHA<tab>
38 \\GREEK SMALL LETTER ALPHA<tab>
39 Ξ±
39 Ξ±
40
40
41
41
42 Only valid Python identifiers will complete. Combining characters (like arrow or
42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 dots) are also available, unlike latex they need to be put after the their
43 dots) are also available, unlike latex they need to be put after the their
44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
45
45
46 Some browsers are known to display combining characters incorrectly.
46 Some browsers are known to display combining characters incorrectly.
47
47
48 Backward latex completion
48 Backward latex completion
49 -------------------------
49 -------------------------
50
50
51 It is sometime challenging to know how to type a character, if you are using
51 It is sometime challenging to know how to type a character, if you are using
52 IPython, or any compatible frontend you can prepend backslash to the character
52 IPython, or any compatible frontend you can prepend backslash to the character
53 and press ``<tab>`` to expand it to its latex form.
53 and press ``<tab>`` to expand it to its latex form.
54
54
55 .. code::
55 .. code::
56
56
57 \\Ξ±<tab>
57 \\Ξ±<tab>
58 \\alpha
58 \\alpha
59
59
60
60
61 Both forward and backward completions can be deactivated by setting the
61 Both forward and backward completions can be deactivated by setting the
62 ``Completer.backslash_combining_completions`` option to ``False``.
62 ``Completer.backslash_combining_completions`` option to ``False``.
63
63
64
64
65 Experimental
65 Experimental
66 ============
66 ============
67
67
68 Starting with IPython 6.0, this module can make use of the Jedi library to
68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 generate completions both using static analysis of the code, and dynamically
69 generate completions both using static analysis of the code, and dynamically
70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 for Python. The APIs attached to this new mechanism is unstable and will
71 for Python. The APIs attached to this new mechanism is unstable and will
72 raise unless use in an :any:`provisionalcompleter` context manager.
72 raise unless use in an :any:`provisionalcompleter` context manager.
73
73
74 You will find that the following are experimental:
74 You will find that the following are experimental:
75
75
76 - :any:`provisionalcompleter`
76 - :any:`provisionalcompleter`
77 - :any:`IPCompleter.completions`
77 - :any:`IPCompleter.completions`
78 - :any:`Completion`
78 - :any:`Completion`
79 - :any:`rectify_completions`
79 - :any:`rectify_completions`
80
80
81 .. note::
81 .. note::
82
82
83 better name for :any:`rectify_completions` ?
83 better name for :any:`rectify_completions` ?
84
84
85 We welcome any feedback on these new API, and we also encourage you to try this
85 We welcome any feedback on these new API, and we also encourage you to try this
86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 to have extra logging information if :any:`jedi` is crashing, or if current
87 to have extra logging information if :any:`jedi` is crashing, or if current
88 IPython completer pending deprecations are returning results not yet handled
88 IPython completer pending deprecations are returning results not yet handled
89 by :any:`jedi`
89 by :any:`jedi`
90
90
91 Using Jedi for tab completion allow snippets like the following to work without
91 Using Jedi for tab completion allow snippets like the following to work without
92 having to execute any code:
92 having to execute any code:
93
93
94 >>> myvar = ['hello', 42]
94 >>> myvar = ['hello', 42]
95 ... myvar[1].bi<tab>
95 ... myvar[1].bi<tab>
96
96
97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 executing any code unlike the previously available ``IPCompleter.greedy``
98 executing any code unlike the previously available ``IPCompleter.greedy``
99 option.
99 option.
100
100
101 Be sure to update :any:`jedi` to the latest stable version or to try the
101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 current development version to get better completions.
102 current development version to get better completions.
103
103
104 Matchers
104 Matchers
105 ========
105 ========
106
106
107 All completions routines are implemented using unified *Matchers* API.
107 All completions routines are implemented using unified *Matchers* API.
108 The matchers API is provisional and subject to change without notice.
108 The matchers API is provisional and subject to change without notice.
109
109
110 The built-in matchers include:
110 The built-in matchers include:
111
111
112 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
112 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
113 - :any:`IPCompleter.magic_matcher`: completions for magics,
113 - :any:`IPCompleter.magic_matcher`: completions for magics,
114 - :any:`IPCompleter.unicode_name_matcher`,
114 - :any:`IPCompleter.unicode_name_matcher`,
115 :any:`IPCompleter.fwd_unicode_matcher`
115 :any:`IPCompleter.fwd_unicode_matcher`
116 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
116 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
117 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
117 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
118 - :any:`IPCompleter.file_matcher`: paths to files and directories,
118 - :any:`IPCompleter.file_matcher`: paths to files and directories,
119 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
119 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
120 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
120 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
121 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
121 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
122 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
122 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
123 implementation in :any:`InteractiveShell` which uses IPython hooks system
123 implementation in :any:`InteractiveShell` which uses IPython hooks system
124 (`complete_command`) with string dispatch (including regular expressions).
124 (`complete_command`) with string dispatch (including regular expressions).
125 Differently to other matchers, ``custom_completer_matcher`` will not suppress
125 Differently to other matchers, ``custom_completer_matcher`` will not suppress
126 Jedi results to match behaviour in earlier IPython versions.
126 Jedi results to match behaviour in earlier IPython versions.
127
127
128 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
128 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
129
129
130 Matcher API
130 Matcher API
131 -----------
131 -----------
132
132
133 Simplifying some details, the ``Matcher`` interface can described as
133 Simplifying some details, the ``Matcher`` interface can described as
134
134
135 .. code-block::
135 .. code-block::
136
136
137 MatcherAPIv1 = Callable[[str], list[str]]
137 MatcherAPIv1 = Callable[[str], list[str]]
138 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
138 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
139
139
140 Matcher = MatcherAPIv1 | MatcherAPIv2
140 Matcher = MatcherAPIv1 | MatcherAPIv2
141
141
142 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
142 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
143 and remains supported as a simplest way for generating completions. This is also
143 and remains supported as a simplest way for generating completions. This is also
144 currently the only API supported by the IPython hooks system `complete_command`.
144 currently the only API supported by the IPython hooks system `complete_command`.
145
145
146 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
146 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
147 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
147 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
148 and requires a literal ``2`` for v2 Matchers.
148 and requires a literal ``2`` for v2 Matchers.
149
149
150 Once the API stabilises future versions may relax the requirement for specifying
150 Once the API stabilises future versions may relax the requirement for specifying
151 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
151 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
152 please do not rely on the presence of ``matcher_api_version`` for any purposes.
152 please do not rely on the presence of ``matcher_api_version`` for any purposes.
153
153
154 Suppression of competing matchers
154 Suppression of competing matchers
155 ---------------------------------
155 ---------------------------------
156
156
157 By default results from all matchers are combined, in the order determined by
157 By default results from all matchers are combined, in the order determined by
158 their priority. Matchers can request to suppress results from subsequent
158 their priority. Matchers can request to suppress results from subsequent
159 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
159 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
160
160
161 When multiple matchers simultaneously request surpression, the results from of
161 When multiple matchers simultaneously request surpression, the results from of
162 the matcher with higher priority will be returned.
162 the matcher with higher priority will be returned.
163
163
164 Sometimes it is desirable to suppress most but not all other matchers;
164 Sometimes it is desirable to suppress most but not all other matchers;
165 this can be achieved by adding a list of identifiers of matchers which
165 this can be achieved by adding a list of identifiers of matchers which
166 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
166 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
167
167
168 The suppression behaviour can is user-configurable via
168 The suppression behaviour can is user-configurable via
169 :any:`IPCompleter.suppress_competing_matchers`.
169 :any:`IPCompleter.suppress_competing_matchers`.
170 """
170 """
171
171
172
172
173 # Copyright (c) IPython Development Team.
173 # Copyright (c) IPython Development Team.
174 # Distributed under the terms of the Modified BSD License.
174 # Distributed under the terms of the Modified BSD License.
175 #
175 #
176 # Some of this code originated from rlcompleter in the Python standard library
176 # Some of this code originated from rlcompleter in the Python standard library
177 # Copyright (C) 2001 Python Software Foundation, www.python.org
177 # Copyright (C) 2001 Python Software Foundation, www.python.org
178
178
179 from __future__ import annotations
179 from __future__ import annotations
180 import builtins as builtin_mod
180 import builtins as builtin_mod
181 import glob
181 import glob
182 import inspect
182 import inspect
183 import itertools
183 import itertools
184 import keyword
184 import keyword
185 import os
185 import os
186 import re
186 import re
187 import string
187 import string
188 import sys
188 import sys
189 import time
189 import time
190 import unicodedata
190 import unicodedata
191 import uuid
191 import uuid
192 import warnings
192 import warnings
193 from ast import literal_eval
193 from contextlib import contextmanager
194 from contextlib import contextmanager
194 from dataclasses import dataclass
195 from dataclasses import dataclass
195 from functools import cached_property, partial
196 from functools import cached_property, partial
196 from importlib import import_module
197 from importlib import import_module
197 from types import SimpleNamespace
198 from types import SimpleNamespace
198 from typing import (
199 from typing import (
199 Iterable,
200 Iterable,
200 Iterator,
201 Iterator,
201 List,
202 List,
202 Tuple,
203 Tuple,
203 Union,
204 Union,
204 Any,
205 Any,
205 Sequence,
206 Sequence,
206 Dict,
207 Dict,
207 NamedTuple,
208 NamedTuple,
208 Pattern,
209 Pattern,
209 Optional,
210 Optional,
210 TYPE_CHECKING,
211 TYPE_CHECKING,
211 Set,
212 Set,
212 Literal,
213 Literal,
213 )
214 )
214
215
216 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
215 from IPython.core.error import TryNext
217 from IPython.core.error import TryNext
216 from IPython.core.inputtransformer2 import ESC_MAGIC
218 from IPython.core.inputtransformer2 import ESC_MAGIC
217 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
219 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
218 from IPython.core.oinspect import InspectColors
220 from IPython.core.oinspect import InspectColors
219 from IPython.testing.skipdoctest import skip_doctest
221 from IPython.testing.skipdoctest import skip_doctest
220 from IPython.utils import generics
222 from IPython.utils import generics
221 from IPython.utils.decorators import sphinx_options
223 from IPython.utils.decorators import sphinx_options
222 from IPython.utils.dir2 import dir2, get_real_method
224 from IPython.utils.dir2 import dir2, get_real_method
223 from IPython.utils.docs import GENERATING_DOCUMENTATION
225 from IPython.utils.docs import GENERATING_DOCUMENTATION
224 from IPython.utils.path import ensure_dir_exists
226 from IPython.utils.path import ensure_dir_exists
225 from IPython.utils.process import arg_split
227 from IPython.utils.process import arg_split
226 from traitlets import (
228 from traitlets import (
227 Bool,
229 Bool,
228 Enum,
230 Enum,
229 Int,
231 Int,
230 List as ListTrait,
232 List as ListTrait,
231 Unicode,
233 Unicode,
232 Dict as DictTrait,
234 Dict as DictTrait,
233 Union as UnionTrait,
235 Union as UnionTrait,
234 default,
236 default,
235 observe,
237 observe,
236 )
238 )
237 from traitlets.config.configurable import Configurable
239 from traitlets.config.configurable import Configurable
238
240
239 import __main__
241 import __main__
240
242
241 # skip module docstests
243 # skip module docstests
242 __skip_doctest__ = True
244 __skip_doctest__ = True
243
245
244
246
245 try:
247 try:
246 import jedi
248 import jedi
247 jedi.settings.case_insensitive_completion = False
249 jedi.settings.case_insensitive_completion = False
248 import jedi.api.helpers
250 import jedi.api.helpers
249 import jedi.api.classes
251 import jedi.api.classes
250 JEDI_INSTALLED = True
252 JEDI_INSTALLED = True
251 except ImportError:
253 except ImportError:
252 JEDI_INSTALLED = False
254 JEDI_INSTALLED = False
253
255
254
256
255 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
257 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
256 from typing import cast
258 from typing import cast
257 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias
259 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias
258 else:
260 else:
259
261
260 def cast(obj, type_):
262 def cast(obj, type_):
261 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
263 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
262 return obj
264 return obj
263
265
264 # do not require on runtime
266 # do not require on runtime
265 NotRequired = Tuple # requires Python >=3.11
267 NotRequired = Tuple # requires Python >=3.11
266 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
268 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
267 Protocol = object # requires Python >=3.8
269 Protocol = object # requires Python >=3.8
268 TypeAlias = Any # requires Python >=3.10
270 TypeAlias = Any # requires Python >=3.10
269 if GENERATING_DOCUMENTATION:
271 if GENERATING_DOCUMENTATION:
270 from typing import TypedDict
272 from typing import TypedDict
271
273
272 # -----------------------------------------------------------------------------
274 # -----------------------------------------------------------------------------
273 # Globals
275 # Globals
274 #-----------------------------------------------------------------------------
276 #-----------------------------------------------------------------------------
275
277
276 # ranges where we have most of the valid unicode names. We could be more finer
278 # ranges where we have most of the valid unicode names. We could be more finer
277 # grained but is it worth it for performance While unicode have character in the
279 # grained but is it worth it for performance While unicode have character in the
278 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
280 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
279 # write this). With below range we cover them all, with a density of ~67%
281 # write this). With below range we cover them all, with a density of ~67%
280 # biggest next gap we consider only adds up about 1% density and there are 600
282 # biggest next gap we consider only adds up about 1% density and there are 600
281 # gaps that would need hard coding.
283 # gaps that would need hard coding.
282 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
284 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
283
285
284 # Public API
286 # Public API
285 __all__ = ["Completer", "IPCompleter"]
287 __all__ = ["Completer", "IPCompleter"]
286
288
287 if sys.platform == 'win32':
289 if sys.platform == 'win32':
288 PROTECTABLES = ' '
290 PROTECTABLES = ' '
289 else:
291 else:
290 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
292 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
291
293
292 # Protect against returning an enormous number of completions which the frontend
294 # Protect against returning an enormous number of completions which the frontend
293 # may have trouble processing.
295 # may have trouble processing.
294 MATCHES_LIMIT = 500
296 MATCHES_LIMIT = 500
295
297
296 # Completion type reported when no type can be inferred.
298 # Completion type reported when no type can be inferred.
297 _UNKNOWN_TYPE = "<unknown>"
299 _UNKNOWN_TYPE = "<unknown>"
298
300
301 # sentinel value to signal lack of a match
302 not_found = object()
303
299 class ProvisionalCompleterWarning(FutureWarning):
304 class ProvisionalCompleterWarning(FutureWarning):
300 """
305 """
301 Exception raise by an experimental feature in this module.
306 Exception raise by an experimental feature in this module.
302
307
303 Wrap code in :any:`provisionalcompleter` context manager if you
308 Wrap code in :any:`provisionalcompleter` context manager if you
304 are certain you want to use an unstable feature.
309 are certain you want to use an unstable feature.
305 """
310 """
306 pass
311 pass
307
312
308 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
313 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
309
314
310
315
311 @skip_doctest
316 @skip_doctest
312 @contextmanager
317 @contextmanager
313 def provisionalcompleter(action='ignore'):
318 def provisionalcompleter(action='ignore'):
314 """
319 """
315 This context manager has to be used in any place where unstable completer
320 This context manager has to be used in any place where unstable completer
316 behavior and API may be called.
321 behavior and API may be called.
317
322
318 >>> with provisionalcompleter():
323 >>> with provisionalcompleter():
319 ... completer.do_experimental_things() # works
324 ... completer.do_experimental_things() # works
320
325
321 >>> completer.do_experimental_things() # raises.
326 >>> completer.do_experimental_things() # raises.
322
327
323 .. note::
328 .. note::
324
329
325 Unstable
330 Unstable
326
331
327 By using this context manager you agree that the API in use may change
332 By using this context manager you agree that the API in use may change
328 without warning, and that you won't complain if they do so.
333 without warning, and that you won't complain if they do so.
329
334
330 You also understand that, if the API is not to your liking, you should report
335 You also understand that, if the API is not to your liking, you should report
331 a bug to explain your use case upstream.
336 a bug to explain your use case upstream.
332
337
333 We'll be happy to get your feedback, feature requests, and improvements on
338 We'll be happy to get your feedback, feature requests, and improvements on
334 any of the unstable APIs!
339 any of the unstable APIs!
335 """
340 """
336 with warnings.catch_warnings():
341 with warnings.catch_warnings():
337 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
342 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
338 yield
343 yield
339
344
340
345
341 def has_open_quotes(s):
346 def has_open_quotes(s):
342 """Return whether a string has open quotes.
347 """Return whether a string has open quotes.
343
348
344 This simply counts whether the number of quote characters of either type in
349 This simply counts whether the number of quote characters of either type in
345 the string is odd.
350 the string is odd.
346
351
347 Returns
352 Returns
348 -------
353 -------
349 If there is an open quote, the quote character is returned. Else, return
354 If there is an open quote, the quote character is returned. Else, return
350 False.
355 False.
351 """
356 """
352 # We check " first, then ', so complex cases with nested quotes will get
357 # We check " first, then ', so complex cases with nested quotes will get
353 # the " to take precedence.
358 # the " to take precedence.
354 if s.count('"') % 2:
359 if s.count('"') % 2:
355 return '"'
360 return '"'
356 elif s.count("'") % 2:
361 elif s.count("'") % 2:
357 return "'"
362 return "'"
358 else:
363 else:
359 return False
364 return False
360
365
361
366
362 def protect_filename(s, protectables=PROTECTABLES):
367 def protect_filename(s, protectables=PROTECTABLES):
363 """Escape a string to protect certain characters."""
368 """Escape a string to protect certain characters."""
364 if set(s) & set(protectables):
369 if set(s) & set(protectables):
365 if sys.platform == "win32":
370 if sys.platform == "win32":
366 return '"' + s + '"'
371 return '"' + s + '"'
367 else:
372 else:
368 return "".join(("\\" + c if c in protectables else c) for c in s)
373 return "".join(("\\" + c if c in protectables else c) for c in s)
369 else:
374 else:
370 return s
375 return s
371
376
372
377
373 def expand_user(path:str) -> Tuple[str, bool, str]:
378 def expand_user(path:str) -> Tuple[str, bool, str]:
374 """Expand ``~``-style usernames in strings.
379 """Expand ``~``-style usernames in strings.
375
380
376 This is similar to :func:`os.path.expanduser`, but it computes and returns
381 This is similar to :func:`os.path.expanduser`, but it computes and returns
377 extra information that will be useful if the input was being used in
382 extra information that will be useful if the input was being used in
378 computing completions, and you wish to return the completions with the
383 computing completions, and you wish to return the completions with the
379 original '~' instead of its expanded value.
384 original '~' instead of its expanded value.
380
385
381 Parameters
386 Parameters
382 ----------
387 ----------
383 path : str
388 path : str
384 String to be expanded. If no ~ is present, the output is the same as the
389 String to be expanded. If no ~ is present, the output is the same as the
385 input.
390 input.
386
391
387 Returns
392 Returns
388 -------
393 -------
389 newpath : str
394 newpath : str
390 Result of ~ expansion in the input path.
395 Result of ~ expansion in the input path.
391 tilde_expand : bool
396 tilde_expand : bool
392 Whether any expansion was performed or not.
397 Whether any expansion was performed or not.
393 tilde_val : str
398 tilde_val : str
394 The value that ~ was replaced with.
399 The value that ~ was replaced with.
395 """
400 """
396 # Default values
401 # Default values
397 tilde_expand = False
402 tilde_expand = False
398 tilde_val = ''
403 tilde_val = ''
399 newpath = path
404 newpath = path
400
405
401 if path.startswith('~'):
406 if path.startswith('~'):
402 tilde_expand = True
407 tilde_expand = True
403 rest = len(path)-1
408 rest = len(path)-1
404 newpath = os.path.expanduser(path)
409 newpath = os.path.expanduser(path)
405 if rest:
410 if rest:
406 tilde_val = newpath[:-rest]
411 tilde_val = newpath[:-rest]
407 else:
412 else:
408 tilde_val = newpath
413 tilde_val = newpath
409
414
410 return newpath, tilde_expand, tilde_val
415 return newpath, tilde_expand, tilde_val
411
416
412
417
413 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
418 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
414 """Does the opposite of expand_user, with its outputs.
419 """Does the opposite of expand_user, with its outputs.
415 """
420 """
416 if tilde_expand:
421 if tilde_expand:
417 return path.replace(tilde_val, '~')
422 return path.replace(tilde_val, '~')
418 else:
423 else:
419 return path
424 return path
420
425
421
426
422 def completions_sorting_key(word):
427 def completions_sorting_key(word):
423 """key for sorting completions
428 """key for sorting completions
424
429
425 This does several things:
430 This does several things:
426
431
427 - Demote any completions starting with underscores to the end
432 - Demote any completions starting with underscores to the end
428 - Insert any %magic and %%cellmagic completions in the alphabetical order
433 - Insert any %magic and %%cellmagic completions in the alphabetical order
429 by their name
434 by their name
430 """
435 """
431 prio1, prio2 = 0, 0
436 prio1, prio2 = 0, 0
432
437
433 if word.startswith('__'):
438 if word.startswith('__'):
434 prio1 = 2
439 prio1 = 2
435 elif word.startswith('_'):
440 elif word.startswith('_'):
436 prio1 = 1
441 prio1 = 1
437
442
438 if word.endswith('='):
443 if word.endswith('='):
439 prio1 = -1
444 prio1 = -1
440
445
441 if word.startswith('%%'):
446 if word.startswith('%%'):
442 # If there's another % in there, this is something else, so leave it alone
447 # If there's another % in there, this is something else, so leave it alone
443 if not "%" in word[2:]:
448 if not "%" in word[2:]:
444 word = word[2:]
449 word = word[2:]
445 prio2 = 2
450 prio2 = 2
446 elif word.startswith('%'):
451 elif word.startswith('%'):
447 if not "%" in word[1:]:
452 if not "%" in word[1:]:
448 word = word[1:]
453 word = word[1:]
449 prio2 = 1
454 prio2 = 1
450
455
451 return prio1, word, prio2
456 return prio1, word, prio2
452
457
453
458
454 class _FakeJediCompletion:
459 class _FakeJediCompletion:
455 """
460 """
456 This is a workaround to communicate to the UI that Jedi has crashed and to
461 This is a workaround to communicate to the UI that Jedi has crashed and to
457 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
462 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
458
463
459 Added in IPython 6.0 so should likely be removed for 7.0
464 Added in IPython 6.0 so should likely be removed for 7.0
460
465
461 """
466 """
462
467
463 def __init__(self, name):
468 def __init__(self, name):
464
469
465 self.name = name
470 self.name = name
466 self.complete = name
471 self.complete = name
467 self.type = 'crashed'
472 self.type = 'crashed'
468 self.name_with_symbols = name
473 self.name_with_symbols = name
469 self.signature = ''
474 self.signature = ''
470 self._origin = 'fake'
475 self._origin = 'fake'
471
476
472 def __repr__(self):
477 def __repr__(self):
473 return '<Fake completion object jedi has crashed>'
478 return '<Fake completion object jedi has crashed>'
474
479
475
480
476 _JediCompletionLike = Union[jedi.api.Completion, _FakeJediCompletion]
481 _JediCompletionLike = Union[jedi.api.Completion, _FakeJediCompletion]
477
482
478
483
479 class Completion:
484 class Completion:
480 """
485 """
481 Completion object used and returned by IPython completers.
486 Completion object used and returned by IPython completers.
482
487
483 .. warning::
488 .. warning::
484
489
485 Unstable
490 Unstable
486
491
487 This function is unstable, API may change without warning.
492 This function is unstable, API may change without warning.
488 It will also raise unless use in proper context manager.
493 It will also raise unless use in proper context manager.
489
494
490 This act as a middle ground :any:`Completion` object between the
495 This act as a middle ground :any:`Completion` object between the
491 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
496 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
492 object. While Jedi need a lot of information about evaluator and how the
497 object. While Jedi need a lot of information about evaluator and how the
493 code should be ran/inspected, PromptToolkit (and other frontend) mostly
498 code should be ran/inspected, PromptToolkit (and other frontend) mostly
494 need user facing information.
499 need user facing information.
495
500
496 - Which range should be replaced replaced by what.
501 - Which range should be replaced replaced by what.
497 - Some metadata (like completion type), or meta information to displayed to
502 - Some metadata (like completion type), or meta information to displayed to
498 the use user.
503 the use user.
499
504
500 For debugging purpose we can also store the origin of the completion (``jedi``,
505 For debugging purpose we can also store the origin of the completion (``jedi``,
501 ``IPython.python_matches``, ``IPython.magics_matches``...).
506 ``IPython.python_matches``, ``IPython.magics_matches``...).
502 """
507 """
503
508
504 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
509 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
505
510
506 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
511 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
507 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
512 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
508 "It may change without warnings. "
513 "It may change without warnings. "
509 "Use in corresponding context manager.",
514 "Use in corresponding context manager.",
510 category=ProvisionalCompleterWarning, stacklevel=2)
515 category=ProvisionalCompleterWarning, stacklevel=2)
511
516
512 self.start = start
517 self.start = start
513 self.end = end
518 self.end = end
514 self.text = text
519 self.text = text
515 self.type = type
520 self.type = type
516 self.signature = signature
521 self.signature = signature
517 self._origin = _origin
522 self._origin = _origin
518
523
519 def __repr__(self):
524 def __repr__(self):
520 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
525 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
521 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
526 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
522
527
523 def __eq__(self, other)->Bool:
528 def __eq__(self, other)->Bool:
524 """
529 """
525 Equality and hash do not hash the type (as some completer may not be
530 Equality and hash do not hash the type (as some completer may not be
526 able to infer the type), but are use to (partially) de-duplicate
531 able to infer the type), but are use to (partially) de-duplicate
527 completion.
532 completion.
528
533
529 Completely de-duplicating completion is a bit tricker that just
534 Completely de-duplicating completion is a bit tricker that just
530 comparing as it depends on surrounding text, which Completions are not
535 comparing as it depends on surrounding text, which Completions are not
531 aware of.
536 aware of.
532 """
537 """
533 return self.start == other.start and \
538 return self.start == other.start and \
534 self.end == other.end and \
539 self.end == other.end and \
535 self.text == other.text
540 self.text == other.text
536
541
537 def __hash__(self):
542 def __hash__(self):
538 return hash((self.start, self.end, self.text))
543 return hash((self.start, self.end, self.text))
539
544
540
545
541 class SimpleCompletion:
546 class SimpleCompletion:
542 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
547 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
543
548
544 .. warning::
549 .. warning::
545
550
546 Provisional
551 Provisional
547
552
548 This class is used to describe the currently supported attributes of
553 This class is used to describe the currently supported attributes of
549 simple completion items, and any additional implementation details
554 simple completion items, and any additional implementation details
550 should not be relied on. Additional attributes may be included in
555 should not be relied on. Additional attributes may be included in
551 future versions, and meaning of text disambiguated from the current
556 future versions, and meaning of text disambiguated from the current
552 dual meaning of "text to insert" and "text to used as a label".
557 dual meaning of "text to insert" and "text to used as a label".
553 """
558 """
554
559
555 __slots__ = ["text", "type"]
560 __slots__ = ["text", "type"]
556
561
557 def __init__(self, text: str, *, type: str = None):
562 def __init__(self, text: str, *, type: str = None):
558 self.text = text
563 self.text = text
559 self.type = type
564 self.type = type
560
565
561 def __repr__(self):
566 def __repr__(self):
562 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
567 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
563
568
564
569
565 class _MatcherResultBase(TypedDict):
570 class _MatcherResultBase(TypedDict):
566 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
571 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
567
572
568 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
573 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
569 matched_fragment: NotRequired[str]
574 matched_fragment: NotRequired[str]
570
575
571 #: Whether to suppress results from all other matchers (True), some
576 #: Whether to suppress results from all other matchers (True), some
572 #: matchers (set of identifiers) or none (False); default is False.
577 #: matchers (set of identifiers) or none (False); default is False.
573 suppress: NotRequired[Union[bool, Set[str]]]
578 suppress: NotRequired[Union[bool, Set[str]]]
574
579
575 #: Identifiers of matchers which should NOT be suppressed when this matcher
580 #: Identifiers of matchers which should NOT be suppressed when this matcher
576 #: requests to suppress all other matchers; defaults to an empty set.
581 #: requests to suppress all other matchers; defaults to an empty set.
577 do_not_suppress: NotRequired[Set[str]]
582 do_not_suppress: NotRequired[Set[str]]
578
583
579 #: Are completions already ordered and should be left as-is? default is False.
584 #: Are completions already ordered and should be left as-is? default is False.
580 ordered: NotRequired[bool]
585 ordered: NotRequired[bool]
581
586
582
587
583 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
588 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
584 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
589 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
585 """Result of new-style completion matcher."""
590 """Result of new-style completion matcher."""
586
591
587 # note: TypedDict is added again to the inheritance chain
592 # note: TypedDict is added again to the inheritance chain
588 # in order to get __orig_bases__ for documentation
593 # in order to get __orig_bases__ for documentation
589
594
590 #: List of candidate completions
595 #: List of candidate completions
591 completions: Sequence[SimpleCompletion]
596 completions: Sequence[SimpleCompletion]
592
597
593
598
594 class _JediMatcherResult(_MatcherResultBase):
599 class _JediMatcherResult(_MatcherResultBase):
595 """Matching result returned by Jedi (will be processed differently)"""
600 """Matching result returned by Jedi (will be processed differently)"""
596
601
597 #: list of candidate completions
602 #: list of candidate completions
598 completions: Iterable[_JediCompletionLike]
603 completions: Iterable[_JediCompletionLike]
599
604
600
605
601 @dataclass
606 @dataclass
602 class CompletionContext:
607 class CompletionContext:
603 """Completion context provided as an argument to matchers in the Matcher API v2."""
608 """Completion context provided as an argument to matchers in the Matcher API v2."""
604
609
605 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
610 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
606 # which was not explicitly visible as an argument of the matcher, making any refactor
611 # which was not explicitly visible as an argument of the matcher, making any refactor
607 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
612 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
608 # from the completer, and make substituting them in sub-classes easier.
613 # from the completer, and make substituting them in sub-classes easier.
609
614
610 #: Relevant fragment of code directly preceding the cursor.
615 #: Relevant fragment of code directly preceding the cursor.
611 #: The extraction of token is implemented via splitter heuristic
616 #: The extraction of token is implemented via splitter heuristic
612 #: (following readline behaviour for legacy reasons), which is user configurable
617 #: (following readline behaviour for legacy reasons), which is user configurable
613 #: (by switching the greedy mode).
618 #: (by switching the greedy mode).
614 token: str
619 token: str
615
620
616 #: The full available content of the editor or buffer
621 #: The full available content of the editor or buffer
617 full_text: str
622 full_text: str
618
623
619 #: Cursor position in the line (the same for ``full_text`` and ``text``).
624 #: Cursor position in the line (the same for ``full_text`` and ``text``).
620 cursor_position: int
625 cursor_position: int
621
626
622 #: Cursor line in ``full_text``.
627 #: Cursor line in ``full_text``.
623 cursor_line: int
628 cursor_line: int
624
629
625 #: The maximum number of completions that will be used downstream.
630 #: The maximum number of completions that will be used downstream.
626 #: Matchers can use this information to abort early.
631 #: Matchers can use this information to abort early.
627 #: The built-in Jedi matcher is currently excepted from this limit.
632 #: The built-in Jedi matcher is currently excepted from this limit.
628 # If not given, return all possible completions.
633 # If not given, return all possible completions.
629 limit: Optional[int]
634 limit: Optional[int]
630
635
631 @cached_property
636 @cached_property
632 def text_until_cursor(self) -> str:
637 def text_until_cursor(self) -> str:
633 return self.line_with_cursor[: self.cursor_position]
638 return self.line_with_cursor[: self.cursor_position]
634
639
635 @cached_property
640 @cached_property
636 def line_with_cursor(self) -> str:
641 def line_with_cursor(self) -> str:
637 return self.full_text.split("\n")[self.cursor_line]
642 return self.full_text.split("\n")[self.cursor_line]
638
643
639
644
640 #: Matcher results for API v2.
645 #: Matcher results for API v2.
641 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
646 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
642
647
643
648
644 class _MatcherAPIv1Base(Protocol):
649 class _MatcherAPIv1Base(Protocol):
645 def __call__(self, text: str) -> list[str]:
650 def __call__(self, text: str) -> list[str]:
646 """Call signature."""
651 """Call signature."""
647
652
648
653
649 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
654 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
650 #: API version
655 #: API version
651 matcher_api_version: Optional[Literal[1]]
656 matcher_api_version: Optional[Literal[1]]
652
657
653 def __call__(self, text: str) -> list[str]:
658 def __call__(self, text: str) -> list[str]:
654 """Call signature."""
659 """Call signature."""
655
660
656
661
657 #: Protocol describing Matcher API v1.
662 #: Protocol describing Matcher API v1.
658 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
663 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
659
664
660
665
661 class MatcherAPIv2(Protocol):
666 class MatcherAPIv2(Protocol):
662 """Protocol describing Matcher API v2."""
667 """Protocol describing Matcher API v2."""
663
668
664 #: API version
669 #: API version
665 matcher_api_version: Literal[2] = 2
670 matcher_api_version: Literal[2] = 2
666
671
667 def __call__(self, context: CompletionContext) -> MatcherResult:
672 def __call__(self, context: CompletionContext) -> MatcherResult:
668 """Call signature."""
673 """Call signature."""
669
674
670
675
671 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
676 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
672
677
673
678
674 def has_any_completions(result: MatcherResult) -> bool:
679 def has_any_completions(result: MatcherResult) -> bool:
675 """Check if any result includes any completions."""
680 """Check if any result includes any completions."""
676 if hasattr(result["completions"], "__len__"):
681 if hasattr(result["completions"], "__len__"):
677 return len(result["completions"]) != 0
682 return len(result["completions"]) != 0
678 try:
683 try:
679 old_iterator = result["completions"]
684 old_iterator = result["completions"]
680 first = next(old_iterator)
685 first = next(old_iterator)
681 result["completions"] = itertools.chain([first], old_iterator)
686 result["completions"] = itertools.chain([first], old_iterator)
682 return True
687 return True
683 except StopIteration:
688 except StopIteration:
684 return False
689 return False
685
690
686
691
687 def completion_matcher(
692 def completion_matcher(
688 *, priority: float = None, identifier: str = None, api_version: int = 1
693 *, priority: float = None, identifier: str = None, api_version: int = 1
689 ):
694 ):
690 """Adds attributes describing the matcher.
695 """Adds attributes describing the matcher.
691
696
692 Parameters
697 Parameters
693 ----------
698 ----------
694 priority : Optional[float]
699 priority : Optional[float]
695 The priority of the matcher, determines the order of execution of matchers.
700 The priority of the matcher, determines the order of execution of matchers.
696 Higher priority means that the matcher will be executed first. Defaults to 0.
701 Higher priority means that the matcher will be executed first. Defaults to 0.
697 identifier : Optional[str]
702 identifier : Optional[str]
698 identifier of the matcher allowing users to modify the behaviour via traitlets,
703 identifier of the matcher allowing users to modify the behaviour via traitlets,
699 and also used to for debugging (will be passed as ``origin`` with the completions).
704 and also used to for debugging (will be passed as ``origin`` with the completions).
700
705
701 Defaults to matcher function's ``__qualname__`` (for example,
706 Defaults to matcher function's ``__qualname__`` (for example,
702 ``IPCompleter.file_matcher`` for the built-in matched defined
707 ``IPCompleter.file_matcher`` for the built-in matched defined
703 as a ``file_matcher`` method of the ``IPCompleter`` class).
708 as a ``file_matcher`` method of the ``IPCompleter`` class).
704 api_version: Optional[int]
709 api_version: Optional[int]
705 version of the Matcher API used by this matcher.
710 version of the Matcher API used by this matcher.
706 Currently supported values are 1 and 2.
711 Currently supported values are 1 and 2.
707 Defaults to 1.
712 Defaults to 1.
708 """
713 """
709
714
710 def wrapper(func: Matcher):
715 def wrapper(func: Matcher):
711 func.matcher_priority = priority or 0
716 func.matcher_priority = priority or 0
712 func.matcher_identifier = identifier or func.__qualname__
717 func.matcher_identifier = identifier or func.__qualname__
713 func.matcher_api_version = api_version
718 func.matcher_api_version = api_version
714 if TYPE_CHECKING:
719 if TYPE_CHECKING:
715 if api_version == 1:
720 if api_version == 1:
716 func = cast(func, MatcherAPIv1)
721 func = cast(func, MatcherAPIv1)
717 elif api_version == 2:
722 elif api_version == 2:
718 func = cast(func, MatcherAPIv2)
723 func = cast(func, MatcherAPIv2)
719 return func
724 return func
720
725
721 return wrapper
726 return wrapper
722
727
723
728
724 def _get_matcher_priority(matcher: Matcher):
729 def _get_matcher_priority(matcher: Matcher):
725 return getattr(matcher, "matcher_priority", 0)
730 return getattr(matcher, "matcher_priority", 0)
726
731
727
732
728 def _get_matcher_id(matcher: Matcher):
733 def _get_matcher_id(matcher: Matcher):
729 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
734 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
730
735
731
736
732 def _get_matcher_api_version(matcher):
737 def _get_matcher_api_version(matcher):
733 return getattr(matcher, "matcher_api_version", 1)
738 return getattr(matcher, "matcher_api_version", 1)
734
739
735
740
736 context_matcher = partial(completion_matcher, api_version=2)
741 context_matcher = partial(completion_matcher, api_version=2)
737
742
738
743
739 _IC = Iterable[Completion]
744 _IC = Iterable[Completion]
740
745
741
746
742 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
747 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
743 """
748 """
744 Deduplicate a set of completions.
749 Deduplicate a set of completions.
745
750
746 .. warning::
751 .. warning::
747
752
748 Unstable
753 Unstable
749
754
750 This function is unstable, API may change without warning.
755 This function is unstable, API may change without warning.
751
756
752 Parameters
757 Parameters
753 ----------
758 ----------
754 text : str
759 text : str
755 text that should be completed.
760 text that should be completed.
756 completions : Iterator[Completion]
761 completions : Iterator[Completion]
757 iterator over the completions to deduplicate
762 iterator over the completions to deduplicate
758
763
759 Yields
764 Yields
760 ------
765 ------
761 `Completions` objects
766 `Completions` objects
762 Completions coming from multiple sources, may be different but end up having
767 Completions coming from multiple sources, may be different but end up having
763 the same effect when applied to ``text``. If this is the case, this will
768 the same effect when applied to ``text``. If this is the case, this will
764 consider completions as equal and only emit the first encountered.
769 consider completions as equal and only emit the first encountered.
765 Not folded in `completions()` yet for debugging purpose, and to detect when
770 Not folded in `completions()` yet for debugging purpose, and to detect when
766 the IPython completer does return things that Jedi does not, but should be
771 the IPython completer does return things that Jedi does not, but should be
767 at some point.
772 at some point.
768 """
773 """
769 completions = list(completions)
774 completions = list(completions)
770 if not completions:
775 if not completions:
771 return
776 return
772
777
773 new_start = min(c.start for c in completions)
778 new_start = min(c.start for c in completions)
774 new_end = max(c.end for c in completions)
779 new_end = max(c.end for c in completions)
775
780
776 seen = set()
781 seen = set()
777 for c in completions:
782 for c in completions:
778 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
783 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
779 if new_text not in seen:
784 if new_text not in seen:
780 yield c
785 yield c
781 seen.add(new_text)
786 seen.add(new_text)
782
787
783
788
784 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
789 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
785 """
790 """
786 Rectify a set of completions to all have the same ``start`` and ``end``
791 Rectify a set of completions to all have the same ``start`` and ``end``
787
792
788 .. warning::
793 .. warning::
789
794
790 Unstable
795 Unstable
791
796
792 This function is unstable, API may change without warning.
797 This function is unstable, API may change without warning.
793 It will also raise unless use in proper context manager.
798 It will also raise unless use in proper context manager.
794
799
795 Parameters
800 Parameters
796 ----------
801 ----------
797 text : str
802 text : str
798 text that should be completed.
803 text that should be completed.
799 completions : Iterator[Completion]
804 completions : Iterator[Completion]
800 iterator over the completions to rectify
805 iterator over the completions to rectify
801 _debug : bool
806 _debug : bool
802 Log failed completion
807 Log failed completion
803
808
804 Notes
809 Notes
805 -----
810 -----
806 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
811 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
807 the Jupyter Protocol requires them to behave like so. This will readjust
812 the Jupyter Protocol requires them to behave like so. This will readjust
808 the completion to have the same ``start`` and ``end`` by padding both
813 the completion to have the same ``start`` and ``end`` by padding both
809 extremities with surrounding text.
814 extremities with surrounding text.
810
815
811 During stabilisation should support a ``_debug`` option to log which
816 During stabilisation should support a ``_debug`` option to log which
812 completion are return by the IPython completer and not found in Jedi in
817 completion are return by the IPython completer and not found in Jedi in
813 order to make upstream bug report.
818 order to make upstream bug report.
814 """
819 """
815 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
820 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
816 "It may change without warnings. "
821 "It may change without warnings. "
817 "Use in corresponding context manager.",
822 "Use in corresponding context manager.",
818 category=ProvisionalCompleterWarning, stacklevel=2)
823 category=ProvisionalCompleterWarning, stacklevel=2)
819
824
820 completions = list(completions)
825 completions = list(completions)
821 if not completions:
826 if not completions:
822 return
827 return
823 starts = (c.start for c in completions)
828 starts = (c.start for c in completions)
824 ends = (c.end for c in completions)
829 ends = (c.end for c in completions)
825
830
826 new_start = min(starts)
831 new_start = min(starts)
827 new_end = max(ends)
832 new_end = max(ends)
828
833
829 seen_jedi = set()
834 seen_jedi = set()
830 seen_python_matches = set()
835 seen_python_matches = set()
831 for c in completions:
836 for c in completions:
832 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
837 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
833 if c._origin == 'jedi':
838 if c._origin == 'jedi':
834 seen_jedi.add(new_text)
839 seen_jedi.add(new_text)
835 elif c._origin == 'IPCompleter.python_matches':
840 elif c._origin == 'IPCompleter.python_matches':
836 seen_python_matches.add(new_text)
841 seen_python_matches.add(new_text)
837 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
842 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
838 diff = seen_python_matches.difference(seen_jedi)
843 diff = seen_python_matches.difference(seen_jedi)
839 if diff and _debug:
844 if diff and _debug:
840 print('IPython.python matches have extras:', diff)
845 print('IPython.python matches have extras:', diff)
841
846
842
847
843 if sys.platform == 'win32':
848 if sys.platform == 'win32':
844 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
849 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
845 else:
850 else:
846 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
851 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
847
852
848 GREEDY_DELIMS = ' =\r\n'
853 GREEDY_DELIMS = ' =\r\n'
849
854
850
855
851 class CompletionSplitter(object):
856 class CompletionSplitter(object):
852 """An object to split an input line in a manner similar to readline.
857 """An object to split an input line in a manner similar to readline.
853
858
854 By having our own implementation, we can expose readline-like completion in
859 By having our own implementation, we can expose readline-like completion in
855 a uniform manner to all frontends. This object only needs to be given the
860 a uniform manner to all frontends. This object only needs to be given the
856 line of text to be split and the cursor position on said line, and it
861 line of text to be split and the cursor position on said line, and it
857 returns the 'word' to be completed on at the cursor after splitting the
862 returns the 'word' to be completed on at the cursor after splitting the
858 entire line.
863 entire line.
859
864
860 What characters are used as splitting delimiters can be controlled by
865 What characters are used as splitting delimiters can be controlled by
861 setting the ``delims`` attribute (this is a property that internally
866 setting the ``delims`` attribute (this is a property that internally
862 automatically builds the necessary regular expression)"""
867 automatically builds the necessary regular expression)"""
863
868
864 # Private interface
869 # Private interface
865
870
866 # A string of delimiter characters. The default value makes sense for
871 # A string of delimiter characters. The default value makes sense for
867 # IPython's most typical usage patterns.
872 # IPython's most typical usage patterns.
868 _delims = DELIMS
873 _delims = DELIMS
869
874
870 # The expression (a normal string) to be compiled into a regular expression
875 # The expression (a normal string) to be compiled into a regular expression
871 # for actual splitting. We store it as an attribute mostly for ease of
876 # for actual splitting. We store it as an attribute mostly for ease of
872 # debugging, since this type of code can be so tricky to debug.
877 # debugging, since this type of code can be so tricky to debug.
873 _delim_expr = None
878 _delim_expr = None
874
879
875 # The regular expression that does the actual splitting
880 # The regular expression that does the actual splitting
876 _delim_re = None
881 _delim_re = None
877
882
878 def __init__(self, delims=None):
883 def __init__(self, delims=None):
879 delims = CompletionSplitter._delims if delims is None else delims
884 delims = CompletionSplitter._delims if delims is None else delims
880 self.delims = delims
885 self.delims = delims
881
886
882 @property
887 @property
883 def delims(self):
888 def delims(self):
884 """Return the string of delimiter characters."""
889 """Return the string of delimiter characters."""
885 return self._delims
890 return self._delims
886
891
887 @delims.setter
892 @delims.setter
888 def delims(self, delims):
893 def delims(self, delims):
889 """Set the delimiters for line splitting."""
894 """Set the delimiters for line splitting."""
890 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
895 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
891 self._delim_re = re.compile(expr)
896 self._delim_re = re.compile(expr)
892 self._delims = delims
897 self._delims = delims
893 self._delim_expr = expr
898 self._delim_expr = expr
894
899
895 def split_line(self, line, cursor_pos=None):
900 def split_line(self, line, cursor_pos=None):
896 """Split a line of text with a cursor at the given position.
901 """Split a line of text with a cursor at the given position.
897 """
902 """
898 l = line if cursor_pos is None else line[:cursor_pos]
903 l = line if cursor_pos is None else line[:cursor_pos]
899 return self._delim_re.split(l)[-1]
904 return self._delim_re.split(l)[-1]
900
905
901
906
902
907
903 class Completer(Configurable):
908 class Completer(Configurable):
904
909
905 greedy = Bool(False,
910 greedy = Bool(
906 help="""Activate greedy completion
911 False,
907 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
912 help="""Activate greedy completion.
913
914 .. deprecated:: 8.8
915 Use :any:`evaluation` instead.
916
917 As of IPython 8.8 proxy for ``evaluation = 'unsafe'`` when set to ``True``,
918 and for ``'forbidden'`` when set to ``False``.
919 """,
920 ).tag(config=True)
908
921
909 This will enable completion on elements of lists, results of function calls, etc.,
922 evaluation = Enum(
910 but can be unsafe because the code is actually evaluated on TAB.
923 ('forbidden', 'minimal', 'limitted', 'unsafe', 'dangerous'),
924 default_value='limitted',
925 help="""Code evaluation under completion.
926
927 Successive options allow to enable more eager evaluation for more accurate completion suggestions,
928 including for nested dictionaries, nested lists, or even results of function calls. Setting `unsafe`
929 or higher can lead to evaluation of arbitrary user code on TAB with potentially dangerous side effects.
930
931 Allowed values are:
932 - `forbidden`: no evaluation at all
933 - `minimal`: evaluation of literals and access to built-in namespaces; no item/attribute evaluation nor access to locals/globals
934 - `limitted` (default): access to all namespaces, evaluation of hard-coded methods (``keys()``, ``__getattr__``, ``__getitems__``, etc) on allow-listed objects (e.g. ``dict``, ``list``, ``tuple``, ``pandas.Series``)
935 - `unsafe`: evaluation of all methods and function calls but not of syntax with side-effects like `del x`,
936 - `dangerous`: completely arbitrary evaluation
911 """,
937 """,
912 ).tag(config=True)
938 ).tag(config=True)
913
939
914 use_jedi = Bool(default_value=JEDI_INSTALLED,
940 use_jedi = Bool(default_value=JEDI_INSTALLED,
915 help="Experimental: Use Jedi to generate autocompletions. "
941 help="Experimental: Use Jedi to generate autocompletions. "
916 "Default to True if jedi is installed.").tag(config=True)
942 "Default to True if jedi is installed.").tag(config=True)
917
943
918 jedi_compute_type_timeout = Int(default_value=400,
944 jedi_compute_type_timeout = Int(default_value=400,
919 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
945 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
920 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
946 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
921 performance by preventing jedi to build its cache.
947 performance by preventing jedi to build its cache.
922 """).tag(config=True)
948 """).tag(config=True)
923
949
924 debug = Bool(default_value=False,
950 debug = Bool(default_value=False,
925 help='Enable debug for the Completer. Mostly print extra '
951 help='Enable debug for the Completer. Mostly print extra '
926 'information for experimental jedi integration.')\
952 'information for experimental jedi integration.')\
927 .tag(config=True)
953 .tag(config=True)
928
954
929 backslash_combining_completions = Bool(True,
955 backslash_combining_completions = Bool(True,
930 help="Enable unicode completions, e.g. \\alpha<tab> . "
956 help="Enable unicode completions, e.g. \\alpha<tab> . "
931 "Includes completion of latex commands, unicode names, and expanding "
957 "Includes completion of latex commands, unicode names, and expanding "
932 "unicode characters back to latex commands.").tag(config=True)
958 "unicode characters back to latex commands.").tag(config=True)
933
959
934 def __init__(self, namespace=None, global_namespace=None, **kwargs):
960 def __init__(self, namespace=None, global_namespace=None, **kwargs):
935 """Create a new completer for the command line.
961 """Create a new completer for the command line.
936
962
937 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
963 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
938
964
939 If unspecified, the default namespace where completions are performed
965 If unspecified, the default namespace where completions are performed
940 is __main__ (technically, __main__.__dict__). Namespaces should be
966 is __main__ (technically, __main__.__dict__). Namespaces should be
941 given as dictionaries.
967 given as dictionaries.
942
968
943 An optional second namespace can be given. This allows the completer
969 An optional second namespace can be given. This allows the completer
944 to handle cases where both the local and global scopes need to be
970 to handle cases where both the local and global scopes need to be
945 distinguished.
971 distinguished.
946 """
972 """
947
973
948 # Don't bind to namespace quite yet, but flag whether the user wants a
974 # Don't bind to namespace quite yet, but flag whether the user wants a
949 # specific namespace or to use __main__.__dict__. This will allow us
975 # specific namespace or to use __main__.__dict__. This will allow us
950 # to bind to __main__.__dict__ at completion time, not now.
976 # to bind to __main__.__dict__ at completion time, not now.
951 if namespace is None:
977 if namespace is None:
952 self.use_main_ns = True
978 self.use_main_ns = True
953 else:
979 else:
954 self.use_main_ns = False
980 self.use_main_ns = False
955 self.namespace = namespace
981 self.namespace = namespace
956
982
957 # The global namespace, if given, can be bound directly
983 # The global namespace, if given, can be bound directly
958 if global_namespace is None:
984 if global_namespace is None:
959 self.global_namespace = {}
985 self.global_namespace = {}
960 else:
986 else:
961 self.global_namespace = global_namespace
987 self.global_namespace = global_namespace
962
988
963 self.custom_matchers = []
989 self.custom_matchers = []
964
990
965 super(Completer, self).__init__(**kwargs)
991 super(Completer, self).__init__(**kwargs)
966
992
967 def complete(self, text, state):
993 def complete(self, text, state):
968 """Return the next possible completion for 'text'.
994 """Return the next possible completion for 'text'.
969
995
970 This is called successively with state == 0, 1, 2, ... until it
996 This is called successively with state == 0, 1, 2, ... until it
971 returns None. The completion should begin with 'text'.
997 returns None. The completion should begin with 'text'.
972
998
973 """
999 """
974 if self.use_main_ns:
1000 if self.use_main_ns:
975 self.namespace = __main__.__dict__
1001 self.namespace = __main__.__dict__
976
1002
977 if state == 0:
1003 if state == 0:
978 if "." in text:
1004 if "." in text:
979 self.matches = self.attr_matches(text)
1005 self.matches = self.attr_matches(text)
980 else:
1006 else:
981 self.matches = self.global_matches(text)
1007 self.matches = self.global_matches(text)
982 try:
1008 try:
983 return self.matches[state]
1009 return self.matches[state]
984 except IndexError:
1010 except IndexError:
985 return None
1011 return None
986
1012
987 def global_matches(self, text):
1013 def global_matches(self, text):
988 """Compute matches when text is a simple name.
1014 """Compute matches when text is a simple name.
989
1015
990 Return a list of all keywords, built-in functions and names currently
1016 Return a list of all keywords, built-in functions and names currently
991 defined in self.namespace or self.global_namespace that match.
1017 defined in self.namespace or self.global_namespace that match.
992
1018
993 """
1019 """
994 matches = []
1020 matches = []
995 match_append = matches.append
1021 match_append = matches.append
996 n = len(text)
1022 n = len(text)
997 for lst in [
1023 for lst in [
998 keyword.kwlist,
1024 keyword.kwlist,
999 builtin_mod.__dict__.keys(),
1025 builtin_mod.__dict__.keys(),
1000 list(self.namespace.keys()),
1026 list(self.namespace.keys()),
1001 list(self.global_namespace.keys()),
1027 list(self.global_namespace.keys()),
1002 ]:
1028 ]:
1003 for word in lst:
1029 for word in lst:
1004 if word[:n] == text and word != "__builtins__":
1030 if word[:n] == text and word != "__builtins__":
1005 match_append(word)
1031 match_append(word)
1006
1032
1007 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1033 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1008 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1034 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1009 shortened = {
1035 shortened = {
1010 "_".join([sub[0] for sub in word.split("_")]): word
1036 "_".join([sub[0] for sub in word.split("_")]): word
1011 for word in lst
1037 for word in lst
1012 if snake_case_re.match(word)
1038 if snake_case_re.match(word)
1013 }
1039 }
1014 for word in shortened.keys():
1040 for word in shortened.keys():
1015 if word[:n] == text and word != "__builtins__":
1041 if word[:n] == text and word != "__builtins__":
1016 match_append(shortened[word])
1042 match_append(shortened[word])
1017 return matches
1043 return matches
1018
1044
1019 def attr_matches(self, text):
1045 def attr_matches(self, text):
1020 """Compute matches when text contains a dot.
1046 """Compute matches when text contains a dot.
1021
1047
1022 Assuming the text is of the form NAME.NAME....[NAME], and is
1048 Assuming the text is of the form NAME.NAME....[NAME], and is
1023 evaluatable in self.namespace or self.global_namespace, it will be
1049 evaluatable in self.namespace or self.global_namespace, it will be
1024 evaluated and its attributes (as revealed by dir()) are used as
1050 evaluated and its attributes (as revealed by dir()) are used as
1025 possible completions. (For class instances, class members are
1051 possible completions. (For class instances, class members are
1026 also considered.)
1052 also considered.)
1027
1053
1028 WARNING: this can still invoke arbitrary C code, if an object
1054 WARNING: this can still invoke arbitrary C code, if an object
1029 with a __getattr__ hook is evaluated.
1055 with a __getattr__ hook is evaluated.
1030
1056
1031 """
1057 """
1058 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1059 if not m2:
1060 return []
1061 expr, attr = m2.group(1,2)
1032
1062
1033 # Another option, seems to work great. Catches things like ''.<tab>
1063 obj = self._evaluate_expr(expr)
1034 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
1035
1064
1036 if m:
1065 if obj is not_found:
1037 expr, attr = m.group(1, 3)
1038 elif self.greedy:
1039 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1040 if not m2:
1041 return []
1042 expr, attr = m2.group(1,2)
1043 else:
1044 return []
1066 return []
1045
1067
1046 try:
1047 obj = eval(expr, self.namespace)
1048 except:
1049 try:
1050 obj = eval(expr, self.global_namespace)
1051 except:
1052 return []
1053
1054 if self.limit_to__all__ and hasattr(obj, '__all__'):
1068 if self.limit_to__all__ and hasattr(obj, '__all__'):
1055 words = get__all__entries(obj)
1069 words = get__all__entries(obj)
1056 else:
1070 else:
1057 words = dir2(obj)
1071 words = dir2(obj)
1058
1072
1059 try:
1073 try:
1060 words = generics.complete_object(obj, words)
1074 words = generics.complete_object(obj, words)
1061 except TryNext:
1075 except TryNext:
1062 pass
1076 pass
1063 except AssertionError:
1077 except AssertionError:
1064 raise
1078 raise
1065 except Exception:
1079 except Exception:
1066 # Silence errors from completion function
1080 # Silence errors from completion function
1067 #raise # dbg
1081 #raise # dbg
1068 pass
1082 pass
1069 # Build match list to return
1083 # Build match list to return
1070 n = len(attr)
1084 n = len(attr)
1071 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
1085 return ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
1072
1086
1073
1087
1088 def _evaluate_expr(self, expr):
1089 obj = not_found
1090 done = False
1091 while not done and expr:
1092 try:
1093 obj = guarded_eval(
1094 expr,
1095 EvaluationContext(
1096 globals_=self.global_namespace,
1097 locals_=self.namespace,
1098 evaluation=self.evaluation
1099 )
1100 )
1101 done = True
1102 except Exception as e:
1103 if self.debug:
1104 print('Evaluation exception', e)
1105 # trim the expression to remove any invalid prefix
1106 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1107 # where parenthesis is not closed.
1108 # TODO: make this faster by reusing parts of the computation?
1109 expr = expr[1:]
1110 return obj
1111
1074 def get__all__entries(obj):
1112 def get__all__entries(obj):
1075 """returns the strings in the __all__ attribute"""
1113 """returns the strings in the __all__ attribute"""
1076 try:
1114 try:
1077 words = getattr(obj, '__all__')
1115 words = getattr(obj, '__all__')
1078 except:
1116 except:
1079 return []
1117 return []
1080
1118
1081 return [w for w in words if isinstance(w, str)]
1119 return [w for w in words if isinstance(w, str)]
1082
1120
1083
1121
1084 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
1122 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]], prefix: str, delims: str,
1085 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
1123 extra_prefix: Optional[Tuple[Union[str, bytes], ...]]=None) -> Tuple[str, int, List[str]]:
1086 """Used by dict_key_matches, matching the prefix to a list of keys
1124 """Used by dict_key_matches, matching the prefix to a list of keys
1087
1125
1088 Parameters
1126 Parameters
1089 ----------
1127 ----------
1090 keys
1128 keys
1091 list of keys in dictionary currently being completed.
1129 list of keys in dictionary currently being completed.
1092 prefix
1130 prefix
1093 Part of the text already typed by the user. E.g. `mydict[b'fo`
1131 Part of the text already typed by the user. E.g. `mydict[b'fo`
1094 delims
1132 delims
1095 String of delimiters to consider when finding the current key.
1133 String of delimiters to consider when finding the current key.
1096 extra_prefix : optional
1134 extra_prefix : optional
1097 Part of the text already typed in multi-key index cases. E.g. for
1135 Part of the text already typed in multi-key index cases. E.g. for
1098 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1136 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1099
1137
1100 Returns
1138 Returns
1101 -------
1139 -------
1102 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1140 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1103 ``quote`` being the quote that need to be used to close current string.
1141 ``quote`` being the quote that need to be used to close current string.
1104 ``token_start`` the position where the replacement should start occurring,
1142 ``token_start`` the position where the replacement should start occurring,
1105 ``matches`` a list of replacement/completion
1143 ``matches`` a list of replacement/completion
1106
1144
1107 """
1145 """
1108 prefix_tuple = extra_prefix if extra_prefix else ()
1146 prefix_tuple = extra_prefix if extra_prefix else ()
1147
1109 Nprefix = len(prefix_tuple)
1148 Nprefix = len(prefix_tuple)
1149 text_serializable_types = (str, bytes, int, float, slice)
1110 def filter_prefix_tuple(key):
1150 def filter_prefix_tuple(key):
1111 # Reject too short keys
1151 # Reject too short keys
1112 if len(key) <= Nprefix:
1152 if len(key) <= Nprefix:
1113 return False
1153 return False
1114 # Reject keys with non str/bytes in it
1154 # Reject keys which cannot be serialised to text
1115 for k in key:
1155 for k in key:
1116 if not isinstance(k, (str, bytes)):
1156 if not isinstance(k, text_serializable_types):
1117 return False
1157 return False
1118 # Reject keys that do not match the prefix
1158 # Reject keys that do not match the prefix
1119 for k, pt in zip(key, prefix_tuple):
1159 for k, pt in zip(key, prefix_tuple):
1120 if k != pt:
1160 if k != pt and not isinstance(pt, slice):
1121 return False
1161 return False
1122 # All checks passed!
1162 # All checks passed!
1123 return True
1163 return True
1124
1164
1125 filtered_keys:List[Union[str,bytes]] = []
1165 filtered_keys: List[Union[str, bytes, int, float, slice]] = []
1166
1126 def _add_to_filtered_keys(key):
1167 def _add_to_filtered_keys(key):
1127 if isinstance(key, (str, bytes)):
1168 if isinstance(key, text_serializable_types):
1128 filtered_keys.append(key)
1169 filtered_keys.append(key)
1129
1170
1130 for k in keys:
1171 for k in keys:
1131 if isinstance(k, tuple):
1172 if isinstance(k, tuple):
1132 if filter_prefix_tuple(k):
1173 if filter_prefix_tuple(k):
1133 _add_to_filtered_keys(k[Nprefix])
1174 _add_to_filtered_keys(k[Nprefix])
1134 else:
1175 else:
1135 _add_to_filtered_keys(k)
1176 _add_to_filtered_keys(k)
1136
1177
1137 if not prefix:
1178 if not prefix:
1138 return '', 0, [repr(k) for k in filtered_keys]
1179 return '', 0, [repr(k) for k in filtered_keys]
1139 quote_match = re.search('["\']', prefix)
1180 quote_match = re.search('["\']', prefix)
1140 assert quote_match is not None # silence mypy
1181 assert quote_match is not None # silence mypy
1141 quote = quote_match.group()
1182 quote = quote_match.group()
1142 try:
1183 try:
1143 prefix_str = eval(prefix + quote, {})
1184 prefix_str = literal_eval(prefix + quote)
1144 except Exception:
1185 except Exception:
1145 return '', 0, []
1186 return '', 0, []
1146
1187
1147 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1188 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1148 token_match = re.search(pattern, prefix, re.UNICODE)
1189 token_match = re.search(pattern, prefix, re.UNICODE)
1149 assert token_match is not None # silence mypy
1190 assert token_match is not None # silence mypy
1150 token_start = token_match.start()
1191 token_start = token_match.start()
1151 token_prefix = token_match.group()
1192 token_prefix = token_match.group()
1152
1193
1153 matched:List[str] = []
1194 matched: List[str] = []
1154 for key in filtered_keys:
1195 for key in filtered_keys:
1196 str_key = key if isinstance(key, (str, bytes)) else str(key)
1155 try:
1197 try:
1156 if not key.startswith(prefix_str):
1198 if not str_key.startswith(prefix_str):
1157 continue
1199 continue
1158 except (AttributeError, TypeError, UnicodeError):
1200 except (AttributeError, TypeError, UnicodeError):
1159 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1201 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1160 continue
1202 continue
1161
1203
1162 # reformat remainder of key to begin with prefix
1204 # reformat remainder of key to begin with prefix
1163 rem = key[len(prefix_str):]
1205 rem = str_key[len(prefix_str):]
1164 # force repr wrapped in '
1206 # force repr wrapped in '
1165 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1207 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1166 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1208 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1167 if quote == '"':
1209 if quote == '"':
1168 # The entered prefix is quoted with ",
1210 # The entered prefix is quoted with ",
1169 # but the match is quoted with '.
1211 # but the match is quoted with '.
1170 # A contained " hence needs escaping for comparison:
1212 # A contained " hence needs escaping for comparison:
1171 rem_repr = rem_repr.replace('"', '\\"')
1213 rem_repr = rem_repr.replace('"', '\\"')
1172
1214
1173 # then reinsert prefix from start of token
1215 # then reinsert prefix from start of token
1174 matched.append('%s%s' % (token_prefix, rem_repr))
1216 matched.append('%s%s' % (token_prefix, rem_repr))
1175 return quote, token_start, matched
1217 return quote, token_start, matched
1176
1218
1177
1219
1178 def cursor_to_position(text:str, line:int, column:int)->int:
1220 def cursor_to_position(text:str, line:int, column:int)->int:
1179 """
1221 """
1180 Convert the (line,column) position of the cursor in text to an offset in a
1222 Convert the (line,column) position of the cursor in text to an offset in a
1181 string.
1223 string.
1182
1224
1183 Parameters
1225 Parameters
1184 ----------
1226 ----------
1185 text : str
1227 text : str
1186 The text in which to calculate the cursor offset
1228 The text in which to calculate the cursor offset
1187 line : int
1229 line : int
1188 Line of the cursor; 0-indexed
1230 Line of the cursor; 0-indexed
1189 column : int
1231 column : int
1190 Column of the cursor 0-indexed
1232 Column of the cursor 0-indexed
1191
1233
1192 Returns
1234 Returns
1193 -------
1235 -------
1194 Position of the cursor in ``text``, 0-indexed.
1236 Position of the cursor in ``text``, 0-indexed.
1195
1237
1196 See Also
1238 See Also
1197 --------
1239 --------
1198 position_to_cursor : reciprocal of this function
1240 position_to_cursor : reciprocal of this function
1199
1241
1200 """
1242 """
1201 lines = text.split('\n')
1243 lines = text.split('\n')
1202 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1244 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1203
1245
1204 return sum(len(l) + 1 for l in lines[:line]) + column
1246 return sum(len(l) + 1 for l in lines[:line]) + column
1205
1247
1206 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1248 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1207 """
1249 """
1208 Convert the position of the cursor in text (0 indexed) to a line
1250 Convert the position of the cursor in text (0 indexed) to a line
1209 number(0-indexed) and a column number (0-indexed) pair
1251 number(0-indexed) and a column number (0-indexed) pair
1210
1252
1211 Position should be a valid position in ``text``.
1253 Position should be a valid position in ``text``.
1212
1254
1213 Parameters
1255 Parameters
1214 ----------
1256 ----------
1215 text : str
1257 text : str
1216 The text in which to calculate the cursor offset
1258 The text in which to calculate the cursor offset
1217 offset : int
1259 offset : int
1218 Position of the cursor in ``text``, 0-indexed.
1260 Position of the cursor in ``text``, 0-indexed.
1219
1261
1220 Returns
1262 Returns
1221 -------
1263 -------
1222 (line, column) : (int, int)
1264 (line, column) : (int, int)
1223 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1265 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1224
1266
1225 See Also
1267 See Also
1226 --------
1268 --------
1227 cursor_to_position : reciprocal of this function
1269 cursor_to_position : reciprocal of this function
1228
1270
1229 """
1271 """
1230
1272
1231 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1273 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1232
1274
1233 before = text[:offset]
1275 before = text[:offset]
1234 blines = before.split('\n') # ! splitnes trim trailing \n
1276 blines = before.split('\n') # ! splitnes trim trailing \n
1235 line = before.count('\n')
1277 line = before.count('\n')
1236 col = len(blines[-1])
1278 col = len(blines[-1])
1237 return line, col
1279 return line, col
1238
1280
1239
1281
1240 def _safe_isinstance(obj, module, class_name):
1282 def _safe_isinstance(obj, module, class_name, *attrs):
1241 """Checks if obj is an instance of module.class_name if loaded
1283 """Checks if obj is an instance of module.class_name if loaded
1242 """
1284 """
1243 return (module in sys.modules and
1285 if module in sys.modules:
1244 isinstance(obj, getattr(import_module(module), class_name)))
1286 m = sys.modules[module]
1287 for attr in [class_name, *attrs]:
1288 m = getattr(m, attr)
1289 return isinstance(obj, m)
1245
1290
1246
1291
1247 @context_matcher()
1292 @context_matcher()
1248 def back_unicode_name_matcher(context: CompletionContext):
1293 def back_unicode_name_matcher(context: CompletionContext):
1249 """Match Unicode characters back to Unicode name
1294 """Match Unicode characters back to Unicode name
1250
1295
1251 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1296 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1252 """
1297 """
1253 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1298 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1254 return _convert_matcher_v1_result_to_v2(
1299 return _convert_matcher_v1_result_to_v2(
1255 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1300 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1256 )
1301 )
1257
1302
1258
1303
1259 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1304 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1260 """Match Unicode characters back to Unicode name
1305 """Match Unicode characters back to Unicode name
1261
1306
1262 This does ``β˜ƒ`` -> ``\\snowman``
1307 This does ``β˜ƒ`` -> ``\\snowman``
1263
1308
1264 Note that snowman is not a valid python3 combining character but will be expanded.
1309 Note that snowman is not a valid python3 combining character but will be expanded.
1265 Though it will not recombine back to the snowman character by the completion machinery.
1310 Though it will not recombine back to the snowman character by the completion machinery.
1266
1311
1267 This will not either back-complete standard sequences like \\n, \\b ...
1312 This will not either back-complete standard sequences like \\n, \\b ...
1268
1313
1269 .. deprecated:: 8.6
1314 .. deprecated:: 8.6
1270 You can use :meth:`back_unicode_name_matcher` instead.
1315 You can use :meth:`back_unicode_name_matcher` instead.
1271
1316
1272 Returns
1317 Returns
1273 =======
1318 =======
1274
1319
1275 Return a tuple with two elements:
1320 Return a tuple with two elements:
1276
1321
1277 - The Unicode character that was matched (preceded with a backslash), or
1322 - The Unicode character that was matched (preceded with a backslash), or
1278 empty string,
1323 empty string,
1279 - a sequence (of 1), name for the match Unicode character, preceded by
1324 - a sequence (of 1), name for the match Unicode character, preceded by
1280 backslash, or empty if no match.
1325 backslash, or empty if no match.
1281 """
1326 """
1282 if len(text)<2:
1327 if len(text)<2:
1283 return '', ()
1328 return '', ()
1284 maybe_slash = text[-2]
1329 maybe_slash = text[-2]
1285 if maybe_slash != '\\':
1330 if maybe_slash != '\\':
1286 return '', ()
1331 return '', ()
1287
1332
1288 char = text[-1]
1333 char = text[-1]
1289 # no expand on quote for completion in strings.
1334 # no expand on quote for completion in strings.
1290 # nor backcomplete standard ascii keys
1335 # nor backcomplete standard ascii keys
1291 if char in string.ascii_letters or char in ('"',"'"):
1336 if char in string.ascii_letters or char in ('"',"'"):
1292 return '', ()
1337 return '', ()
1293 try :
1338 try :
1294 unic = unicodedata.name(char)
1339 unic = unicodedata.name(char)
1295 return '\\'+char,('\\'+unic,)
1340 return '\\'+char,('\\'+unic,)
1296 except KeyError:
1341 except KeyError:
1297 pass
1342 pass
1298 return '', ()
1343 return '', ()
1299
1344
1300
1345
1301 @context_matcher()
1346 @context_matcher()
1302 def back_latex_name_matcher(context: CompletionContext):
1347 def back_latex_name_matcher(context: CompletionContext):
1303 """Match latex characters back to unicode name
1348 """Match latex characters back to unicode name
1304
1349
1305 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1350 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1306 """
1351 """
1307 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1352 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1308 return _convert_matcher_v1_result_to_v2(
1353 return _convert_matcher_v1_result_to_v2(
1309 matches, type="latex", fragment=fragment, suppress_if_matches=True
1354 matches, type="latex", fragment=fragment, suppress_if_matches=True
1310 )
1355 )
1311
1356
1312
1357
1313 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1358 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1314 """Match latex characters back to unicode name
1359 """Match latex characters back to unicode name
1315
1360
1316 This does ``\\β„΅`` -> ``\\aleph``
1361 This does ``\\β„΅`` -> ``\\aleph``
1317
1362
1318 .. deprecated:: 8.6
1363 .. deprecated:: 8.6
1319 You can use :meth:`back_latex_name_matcher` instead.
1364 You can use :meth:`back_latex_name_matcher` instead.
1320 """
1365 """
1321 if len(text)<2:
1366 if len(text)<2:
1322 return '', ()
1367 return '', ()
1323 maybe_slash = text[-2]
1368 maybe_slash = text[-2]
1324 if maybe_slash != '\\':
1369 if maybe_slash != '\\':
1325 return '', ()
1370 return '', ()
1326
1371
1327
1372
1328 char = text[-1]
1373 char = text[-1]
1329 # no expand on quote for completion in strings.
1374 # no expand on quote for completion in strings.
1330 # nor backcomplete standard ascii keys
1375 # nor backcomplete standard ascii keys
1331 if char in string.ascii_letters or char in ('"',"'"):
1376 if char in string.ascii_letters or char in ('"',"'"):
1332 return '', ()
1377 return '', ()
1333 try :
1378 try :
1334 latex = reverse_latex_symbol[char]
1379 latex = reverse_latex_symbol[char]
1335 # '\\' replace the \ as well
1380 # '\\' replace the \ as well
1336 return '\\'+char,[latex]
1381 return '\\'+char,[latex]
1337 except KeyError:
1382 except KeyError:
1338 pass
1383 pass
1339 return '', ()
1384 return '', ()
1340
1385
1341
1386
1342 def _formatparamchildren(parameter) -> str:
1387 def _formatparamchildren(parameter) -> str:
1343 """
1388 """
1344 Get parameter name and value from Jedi Private API
1389 Get parameter name and value from Jedi Private API
1345
1390
1346 Jedi does not expose a simple way to get `param=value` from its API.
1391 Jedi does not expose a simple way to get `param=value` from its API.
1347
1392
1348 Parameters
1393 Parameters
1349 ----------
1394 ----------
1350 parameter
1395 parameter
1351 Jedi's function `Param`
1396 Jedi's function `Param`
1352
1397
1353 Returns
1398 Returns
1354 -------
1399 -------
1355 A string like 'a', 'b=1', '*args', '**kwargs'
1400 A string like 'a', 'b=1', '*args', '**kwargs'
1356
1401
1357 """
1402 """
1358 description = parameter.description
1403 description = parameter.description
1359 if not description.startswith('param '):
1404 if not description.startswith('param '):
1360 raise ValueError('Jedi function parameter description have change format.'
1405 raise ValueError('Jedi function parameter description have change format.'
1361 'Expected "param ...", found %r".' % description)
1406 'Expected "param ...", found %r".' % description)
1362 return description[6:]
1407 return description[6:]
1363
1408
1364 def _make_signature(completion)-> str:
1409 def _make_signature(completion)-> str:
1365 """
1410 """
1366 Make the signature from a jedi completion
1411 Make the signature from a jedi completion
1367
1412
1368 Parameters
1413 Parameters
1369 ----------
1414 ----------
1370 completion : jedi.Completion
1415 completion : jedi.Completion
1371 object does not complete a function type
1416 object does not complete a function type
1372
1417
1373 Returns
1418 Returns
1374 -------
1419 -------
1375 a string consisting of the function signature, with the parenthesis but
1420 a string consisting of the function signature, with the parenthesis but
1376 without the function name. example:
1421 without the function name. example:
1377 `(a, *args, b=1, **kwargs)`
1422 `(a, *args, b=1, **kwargs)`
1378
1423
1379 """
1424 """
1380
1425
1381 # it looks like this might work on jedi 0.17
1426 # it looks like this might work on jedi 0.17
1382 if hasattr(completion, 'get_signatures'):
1427 if hasattr(completion, 'get_signatures'):
1383 signatures = completion.get_signatures()
1428 signatures = completion.get_signatures()
1384 if not signatures:
1429 if not signatures:
1385 return '(?)'
1430 return '(?)'
1386
1431
1387 c0 = completion.get_signatures()[0]
1432 c0 = completion.get_signatures()[0]
1388 return '('+c0.to_string().split('(', maxsplit=1)[1]
1433 return '('+c0.to_string().split('(', maxsplit=1)[1]
1389
1434
1390 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1435 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1391 for p in signature.defined_names()) if f])
1436 for p in signature.defined_names()) if f])
1392
1437
1393
1438
1394 _CompleteResult = Dict[str, MatcherResult]
1439 _CompleteResult = Dict[str, MatcherResult]
1395
1440
1396
1441
1442 DICT_MATCHER_REGEX = re.compile(r"""(?x)
1443 ( # match dict-referring - or any get item object - expression
1444 .+
1445 )
1446 \[ # open bracket
1447 \s* # and optional whitespace
1448 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1449 # and slices
1450 ((?:[uUbB]? # string prefix (r not handled)
1451 (?:
1452 '(?:[^']|(?<!\\)\\')*'
1453 |
1454 "(?:[^"]|(?<!\\)\\")*"
1455 |
1456 # capture integers and slices
1457 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1458 )
1459 \s*,\s*
1460 )*)
1461 ([uUbB]? # string prefix (r not handled)
1462 (?: # unclosed string
1463 '(?:[^']|(?<!\\)\\')*
1464 |
1465 "(?:[^"]|(?<!\\)\\")*
1466 |
1467 (?:[-+]?\d+)
1468 )
1469 )?
1470 $
1471 """)
1472
1397 def _convert_matcher_v1_result_to_v2(
1473 def _convert_matcher_v1_result_to_v2(
1398 matches: Sequence[str],
1474 matches: Sequence[str],
1399 type: str,
1475 type: str,
1400 fragment: str = None,
1476 fragment: str = None,
1401 suppress_if_matches: bool = False,
1477 suppress_if_matches: bool = False,
1402 ) -> SimpleMatcherResult:
1478 ) -> SimpleMatcherResult:
1403 """Utility to help with transition"""
1479 """Utility to help with transition"""
1404 result = {
1480 result = {
1405 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1481 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1406 "suppress": (True if matches else False) if suppress_if_matches else False,
1482 "suppress": (True if matches else False) if suppress_if_matches else False,
1407 }
1483 }
1408 if fragment is not None:
1484 if fragment is not None:
1409 result["matched_fragment"] = fragment
1485 result["matched_fragment"] = fragment
1410 return result
1486 return result
1411
1487
1412
1488
1413 class IPCompleter(Completer):
1489 class IPCompleter(Completer):
1414 """Extension of the completer class with IPython-specific features"""
1490 """Extension of the completer class with IPython-specific features"""
1415
1491
1416 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1417
1418 @observe('greedy')
1492 @observe('greedy')
1419 def _greedy_changed(self, change):
1493 def _greedy_changed(self, change):
1420 """update the splitter and readline delims when greedy is changed"""
1494 """update the splitter and readline delims when greedy is changed"""
1421 if change['new']:
1495 if change['new']:
1496 self.evaluation = 'unsafe'
1422 self.splitter.delims = GREEDY_DELIMS
1497 self.splitter.delims = GREEDY_DELIMS
1423 else:
1498 else:
1499 self.evaluation = 'limitted'
1424 self.splitter.delims = DELIMS
1500 self.splitter.delims = DELIMS
1425
1501
1426 dict_keys_only = Bool(
1502 dict_keys_only = Bool(
1427 False,
1503 False,
1428 help="""
1504 help="""
1429 Whether to show dict key matches only.
1505 Whether to show dict key matches only.
1430
1506
1431 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1507 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1432 """,
1508 """,
1433 )
1509 )
1434
1510
1435 suppress_competing_matchers = UnionTrait(
1511 suppress_competing_matchers = UnionTrait(
1436 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1512 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1437 default_value=None,
1513 default_value=None,
1438 help="""
1514 help="""
1439 Whether to suppress completions from other *Matchers*.
1515 Whether to suppress completions from other *Matchers*.
1440
1516
1441 When set to ``None`` (default) the matchers will attempt to auto-detect
1517 When set to ``None`` (default) the matchers will attempt to auto-detect
1442 whether suppression of other matchers is desirable. For example, at
1518 whether suppression of other matchers is desirable. For example, at
1443 the beginning of a line followed by `%` we expect a magic completion
1519 the beginning of a line followed by `%` we expect a magic completion
1444 to be the only applicable option, and after ``my_dict['`` we usually
1520 to be the only applicable option, and after ``my_dict['`` we usually
1445 expect a completion with an existing dictionary key.
1521 expect a completion with an existing dictionary key.
1446
1522
1447 If you want to disable this heuristic and see completions from all matchers,
1523 If you want to disable this heuristic and see completions from all matchers,
1448 set ``IPCompleter.suppress_competing_matchers = False``.
1524 set ``IPCompleter.suppress_competing_matchers = False``.
1449 To disable the heuristic for specific matchers provide a dictionary mapping:
1525 To disable the heuristic for specific matchers provide a dictionary mapping:
1450 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1526 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1451
1527
1452 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1528 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1453 completions to the set of matchers with the highest priority;
1529 completions to the set of matchers with the highest priority;
1454 this is equivalent to ``IPCompleter.merge_completions`` and
1530 this is equivalent to ``IPCompleter.merge_completions`` and
1455 can be beneficial for performance, but will sometimes omit relevant
1531 can be beneficial for performance, but will sometimes omit relevant
1456 candidates from matchers further down the priority list.
1532 candidates from matchers further down the priority list.
1457 """,
1533 """,
1458 ).tag(config=True)
1534 ).tag(config=True)
1459
1535
1460 merge_completions = Bool(
1536 merge_completions = Bool(
1461 True,
1537 True,
1462 help="""Whether to merge completion results into a single list
1538 help="""Whether to merge completion results into a single list
1463
1539
1464 If False, only the completion results from the first non-empty
1540 If False, only the completion results from the first non-empty
1465 completer will be returned.
1541 completer will be returned.
1466
1542
1467 As of version 8.6.0, setting the value to ``False`` is an alias for:
1543 As of version 8.6.0, setting the value to ``False`` is an alias for:
1468 ``IPCompleter.suppress_competing_matchers = True.``.
1544 ``IPCompleter.suppress_competing_matchers = True.``.
1469 """,
1545 """,
1470 ).tag(config=True)
1546 ).tag(config=True)
1471
1547
1472 disable_matchers = ListTrait(
1548 disable_matchers = ListTrait(
1473 Unicode(),
1549 Unicode(),
1474 help="""List of matchers to disable.
1550 help="""List of matchers to disable.
1475
1551
1476 The list should contain matcher identifiers (see :any:`completion_matcher`).
1552 The list should contain matcher identifiers (see :any:`completion_matcher`).
1477 """,
1553 """,
1478 ).tag(config=True)
1554 ).tag(config=True)
1479
1555
1480 omit__names = Enum(
1556 omit__names = Enum(
1481 (0, 1, 2),
1557 (0, 1, 2),
1482 default_value=2,
1558 default_value=2,
1483 help="""Instruct the completer to omit private method names
1559 help="""Instruct the completer to omit private method names
1484
1560
1485 Specifically, when completing on ``object.<tab>``.
1561 Specifically, when completing on ``object.<tab>``.
1486
1562
1487 When 2 [default]: all names that start with '_' will be excluded.
1563 When 2 [default]: all names that start with '_' will be excluded.
1488
1564
1489 When 1: all 'magic' names (``__foo__``) will be excluded.
1565 When 1: all 'magic' names (``__foo__``) will be excluded.
1490
1566
1491 When 0: nothing will be excluded.
1567 When 0: nothing will be excluded.
1492 """
1568 """
1493 ).tag(config=True)
1569 ).tag(config=True)
1494 limit_to__all__ = Bool(False,
1570 limit_to__all__ = Bool(False,
1495 help="""
1571 help="""
1496 DEPRECATED as of version 5.0.
1572 DEPRECATED as of version 5.0.
1497
1573
1498 Instruct the completer to use __all__ for the completion
1574 Instruct the completer to use __all__ for the completion
1499
1575
1500 Specifically, when completing on ``object.<tab>``.
1576 Specifically, when completing on ``object.<tab>``.
1501
1577
1502 When True: only those names in obj.__all__ will be included.
1578 When True: only those names in obj.__all__ will be included.
1503
1579
1504 When False [default]: the __all__ attribute is ignored
1580 When False [default]: the __all__ attribute is ignored
1505 """,
1581 """,
1506 ).tag(config=True)
1582 ).tag(config=True)
1507
1583
1508 profile_completions = Bool(
1584 profile_completions = Bool(
1509 default_value=False,
1585 default_value=False,
1510 help="If True, emit profiling data for completion subsystem using cProfile."
1586 help="If True, emit profiling data for completion subsystem using cProfile."
1511 ).tag(config=True)
1587 ).tag(config=True)
1512
1588
1513 profiler_output_dir = Unicode(
1589 profiler_output_dir = Unicode(
1514 default_value=".completion_profiles",
1590 default_value=".completion_profiles",
1515 help="Template for path at which to output profile data for completions."
1591 help="Template for path at which to output profile data for completions."
1516 ).tag(config=True)
1592 ).tag(config=True)
1517
1593
1518 @observe('limit_to__all__')
1594 @observe('limit_to__all__')
1519 def _limit_to_all_changed(self, change):
1595 def _limit_to_all_changed(self, change):
1520 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1596 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1521 'value has been deprecated since IPython 5.0, will be made to have '
1597 'value has been deprecated since IPython 5.0, will be made to have '
1522 'no effects and then removed in future version of IPython.',
1598 'no effects and then removed in future version of IPython.',
1523 UserWarning)
1599 UserWarning)
1524
1600
1525 def __init__(
1601 def __init__(
1526 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1602 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1527 ):
1603 ):
1528 """IPCompleter() -> completer
1604 """IPCompleter() -> completer
1529
1605
1530 Return a completer object.
1606 Return a completer object.
1531
1607
1532 Parameters
1608 Parameters
1533 ----------
1609 ----------
1534 shell
1610 shell
1535 a pointer to the ipython shell itself. This is needed
1611 a pointer to the ipython shell itself. This is needed
1536 because this completer knows about magic functions, and those can
1612 because this completer knows about magic functions, and those can
1537 only be accessed via the ipython instance.
1613 only be accessed via the ipython instance.
1538 namespace : dict, optional
1614 namespace : dict, optional
1539 an optional dict where completions are performed.
1615 an optional dict where completions are performed.
1540 global_namespace : dict, optional
1616 global_namespace : dict, optional
1541 secondary optional dict for completions, to
1617 secondary optional dict for completions, to
1542 handle cases (such as IPython embedded inside functions) where
1618 handle cases (such as IPython embedded inside functions) where
1543 both Python scopes are visible.
1619 both Python scopes are visible.
1544 config : Config
1620 config : Config
1545 traitlet's config object
1621 traitlet's config object
1546 **kwargs
1622 **kwargs
1547 passed to super class unmodified.
1623 passed to super class unmodified.
1548 """
1624 """
1549
1625
1550 self.magic_escape = ESC_MAGIC
1626 self.magic_escape = ESC_MAGIC
1551 self.splitter = CompletionSplitter()
1627 self.splitter = CompletionSplitter()
1552
1628
1553 # _greedy_changed() depends on splitter and readline being defined:
1629 # _greedy_changed() depends on splitter and readline being defined:
1554 super().__init__(
1630 super().__init__(
1555 namespace=namespace,
1631 namespace=namespace,
1556 global_namespace=global_namespace,
1632 global_namespace=global_namespace,
1557 config=config,
1633 config=config,
1558 **kwargs,
1634 **kwargs,
1559 )
1635 )
1560
1636
1561 # List where completion matches will be stored
1637 # List where completion matches will be stored
1562 self.matches = []
1638 self.matches = []
1563 self.shell = shell
1639 self.shell = shell
1564 # Regexp to split filenames with spaces in them
1640 # Regexp to split filenames with spaces in them
1565 self.space_name_re = re.compile(r'([^\\] )')
1641 self.space_name_re = re.compile(r'([^\\] )')
1566 # Hold a local ref. to glob.glob for speed
1642 # Hold a local ref. to glob.glob for speed
1567 self.glob = glob.glob
1643 self.glob = glob.glob
1568
1644
1569 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1645 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1570 # buffers, to avoid completion problems.
1646 # buffers, to avoid completion problems.
1571 term = os.environ.get('TERM','xterm')
1647 term = os.environ.get('TERM','xterm')
1572 self.dumb_terminal = term in ['dumb','emacs']
1648 self.dumb_terminal = term in ['dumb','emacs']
1573
1649
1574 # Special handling of backslashes needed in win32 platforms
1650 # Special handling of backslashes needed in win32 platforms
1575 if sys.platform == "win32":
1651 if sys.platform == "win32":
1576 self.clean_glob = self._clean_glob_win32
1652 self.clean_glob = self._clean_glob_win32
1577 else:
1653 else:
1578 self.clean_glob = self._clean_glob
1654 self.clean_glob = self._clean_glob
1579
1655
1580 #regexp to parse docstring for function signature
1656 #regexp to parse docstring for function signature
1581 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1657 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1582 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1658 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1583 #use this if positional argument name is also needed
1659 #use this if positional argument name is also needed
1584 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1660 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1585
1661
1586 self.magic_arg_matchers = [
1662 self.magic_arg_matchers = [
1587 self.magic_config_matcher,
1663 self.magic_config_matcher,
1588 self.magic_color_matcher,
1664 self.magic_color_matcher,
1589 ]
1665 ]
1590
1666
1591 # This is set externally by InteractiveShell
1667 # This is set externally by InteractiveShell
1592 self.custom_completers = None
1668 self.custom_completers = None
1593
1669
1594 # This is a list of names of unicode characters that can be completed
1670 # This is a list of names of unicode characters that can be completed
1595 # into their corresponding unicode value. The list is large, so we
1671 # into their corresponding unicode value. The list is large, so we
1596 # lazily initialize it on first use. Consuming code should access this
1672 # lazily initialize it on first use. Consuming code should access this
1597 # attribute through the `@unicode_names` property.
1673 # attribute through the `@unicode_names` property.
1598 self._unicode_names = None
1674 self._unicode_names = None
1599
1675
1600 self._backslash_combining_matchers = [
1676 self._backslash_combining_matchers = [
1601 self.latex_name_matcher,
1677 self.latex_name_matcher,
1602 self.unicode_name_matcher,
1678 self.unicode_name_matcher,
1603 back_latex_name_matcher,
1679 back_latex_name_matcher,
1604 back_unicode_name_matcher,
1680 back_unicode_name_matcher,
1605 self.fwd_unicode_matcher,
1681 self.fwd_unicode_matcher,
1606 ]
1682 ]
1607
1683
1608 if not self.backslash_combining_completions:
1684 if not self.backslash_combining_completions:
1609 for matcher in self._backslash_combining_matchers:
1685 for matcher in self._backslash_combining_matchers:
1610 self.disable_matchers.append(matcher.matcher_identifier)
1686 self.disable_matchers.append(matcher.matcher_identifier)
1611
1687
1612 if not self.merge_completions:
1688 if not self.merge_completions:
1613 self.suppress_competing_matchers = True
1689 self.suppress_competing_matchers = True
1614
1690
1615 @property
1691 @property
1616 def matchers(self) -> List[Matcher]:
1692 def matchers(self) -> List[Matcher]:
1617 """All active matcher routines for completion"""
1693 """All active matcher routines for completion"""
1618 if self.dict_keys_only:
1694 if self.dict_keys_only:
1619 return [self.dict_key_matcher]
1695 return [self.dict_key_matcher]
1620
1696
1621 if self.use_jedi:
1697 if self.use_jedi:
1622 return [
1698 return [
1623 *self.custom_matchers,
1699 *self.custom_matchers,
1624 *self._backslash_combining_matchers,
1700 *self._backslash_combining_matchers,
1625 *self.magic_arg_matchers,
1701 *self.magic_arg_matchers,
1626 self.custom_completer_matcher,
1702 self.custom_completer_matcher,
1627 self.magic_matcher,
1703 self.magic_matcher,
1628 self._jedi_matcher,
1704 self._jedi_matcher,
1629 self.dict_key_matcher,
1705 self.dict_key_matcher,
1630 self.file_matcher,
1706 self.file_matcher,
1631 ]
1707 ]
1632 else:
1708 else:
1633 return [
1709 return [
1634 *self.custom_matchers,
1710 *self.custom_matchers,
1635 *self._backslash_combining_matchers,
1711 *self._backslash_combining_matchers,
1636 *self.magic_arg_matchers,
1712 *self.magic_arg_matchers,
1637 self.custom_completer_matcher,
1713 self.custom_completer_matcher,
1638 self.dict_key_matcher,
1714 self.dict_key_matcher,
1639 # TODO: convert python_matches to v2 API
1715 # TODO: convert python_matches to v2 API
1640 self.magic_matcher,
1716 self.magic_matcher,
1641 self.python_matches,
1717 self.python_matches,
1642 self.file_matcher,
1718 self.file_matcher,
1643 self.python_func_kw_matcher,
1719 self.python_func_kw_matcher,
1644 ]
1720 ]
1645
1721
1646 def all_completions(self, text:str) -> List[str]:
1722 def all_completions(self, text:str) -> List[str]:
1647 """
1723 """
1648 Wrapper around the completion methods for the benefit of emacs.
1724 Wrapper around the completion methods for the benefit of emacs.
1649 """
1725 """
1650 prefix = text.rpartition('.')[0]
1726 prefix = text.rpartition('.')[0]
1651 with provisionalcompleter():
1727 with provisionalcompleter():
1652 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1728 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1653 for c in self.completions(text, len(text))]
1729 for c in self.completions(text, len(text))]
1654
1730
1655 return self.complete(text)[1]
1731 return self.complete(text)[1]
1656
1732
1657 def _clean_glob(self, text:str):
1733 def _clean_glob(self, text:str):
1658 return self.glob("%s*" % text)
1734 return self.glob("%s*" % text)
1659
1735
1660 def _clean_glob_win32(self, text:str):
1736 def _clean_glob_win32(self, text:str):
1661 return [f.replace("\\","/")
1737 return [f.replace("\\","/")
1662 for f in self.glob("%s*" % text)]
1738 for f in self.glob("%s*" % text)]
1663
1739
1664 @context_matcher()
1740 @context_matcher()
1665 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1741 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1666 """Same as :any:`file_matches`, but adopted to new Matcher API."""
1742 """Same as :any:`file_matches`, but adopted to new Matcher API."""
1667 matches = self.file_matches(context.token)
1743 matches = self.file_matches(context.token)
1668 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
1744 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
1669 # starts with `/home/`, `C:\`, etc)
1745 # starts with `/home/`, `C:\`, etc)
1670 return _convert_matcher_v1_result_to_v2(matches, type="path")
1746 return _convert_matcher_v1_result_to_v2(matches, type="path")
1671
1747
1672 def file_matches(self, text: str) -> List[str]:
1748 def file_matches(self, text: str) -> List[str]:
1673 """Match filenames, expanding ~USER type strings.
1749 """Match filenames, expanding ~USER type strings.
1674
1750
1675 Most of the seemingly convoluted logic in this completer is an
1751 Most of the seemingly convoluted logic in this completer is an
1676 attempt to handle filenames with spaces in them. And yet it's not
1752 attempt to handle filenames with spaces in them. And yet it's not
1677 quite perfect, because Python's readline doesn't expose all of the
1753 quite perfect, because Python's readline doesn't expose all of the
1678 GNU readline details needed for this to be done correctly.
1754 GNU readline details needed for this to be done correctly.
1679
1755
1680 For a filename with a space in it, the printed completions will be
1756 For a filename with a space in it, the printed completions will be
1681 only the parts after what's already been typed (instead of the
1757 only the parts after what's already been typed (instead of the
1682 full completions, as is normally done). I don't think with the
1758 full completions, as is normally done). I don't think with the
1683 current (as of Python 2.3) Python readline it's possible to do
1759 current (as of Python 2.3) Python readline it's possible to do
1684 better.
1760 better.
1685
1761
1686 .. deprecated:: 8.6
1762 .. deprecated:: 8.6
1687 You can use :meth:`file_matcher` instead.
1763 You can use :meth:`file_matcher` instead.
1688 """
1764 """
1689
1765
1690 # chars that require escaping with backslash - i.e. chars
1766 # chars that require escaping with backslash - i.e. chars
1691 # that readline treats incorrectly as delimiters, but we
1767 # that readline treats incorrectly as delimiters, but we
1692 # don't want to treat as delimiters in filename matching
1768 # don't want to treat as delimiters in filename matching
1693 # when escaped with backslash
1769 # when escaped with backslash
1694 if text.startswith('!'):
1770 if text.startswith('!'):
1695 text = text[1:]
1771 text = text[1:]
1696 text_prefix = u'!'
1772 text_prefix = u'!'
1697 else:
1773 else:
1698 text_prefix = u''
1774 text_prefix = u''
1699
1775
1700 text_until_cursor = self.text_until_cursor
1776 text_until_cursor = self.text_until_cursor
1701 # track strings with open quotes
1777 # track strings with open quotes
1702 open_quotes = has_open_quotes(text_until_cursor)
1778 open_quotes = has_open_quotes(text_until_cursor)
1703
1779
1704 if '(' in text_until_cursor or '[' in text_until_cursor:
1780 if '(' in text_until_cursor or '[' in text_until_cursor:
1705 lsplit = text
1781 lsplit = text
1706 else:
1782 else:
1707 try:
1783 try:
1708 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1784 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1709 lsplit = arg_split(text_until_cursor)[-1]
1785 lsplit = arg_split(text_until_cursor)[-1]
1710 except ValueError:
1786 except ValueError:
1711 # typically an unmatched ", or backslash without escaped char.
1787 # typically an unmatched ", or backslash without escaped char.
1712 if open_quotes:
1788 if open_quotes:
1713 lsplit = text_until_cursor.split(open_quotes)[-1]
1789 lsplit = text_until_cursor.split(open_quotes)[-1]
1714 else:
1790 else:
1715 return []
1791 return []
1716 except IndexError:
1792 except IndexError:
1717 # tab pressed on empty line
1793 # tab pressed on empty line
1718 lsplit = ""
1794 lsplit = ""
1719
1795
1720 if not open_quotes and lsplit != protect_filename(lsplit):
1796 if not open_quotes and lsplit != protect_filename(lsplit):
1721 # if protectables are found, do matching on the whole escaped name
1797 # if protectables are found, do matching on the whole escaped name
1722 has_protectables = True
1798 has_protectables = True
1723 text0,text = text,lsplit
1799 text0,text = text,lsplit
1724 else:
1800 else:
1725 has_protectables = False
1801 has_protectables = False
1726 text = os.path.expanduser(text)
1802 text = os.path.expanduser(text)
1727
1803
1728 if text == "":
1804 if text == "":
1729 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1805 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1730
1806
1731 # Compute the matches from the filesystem
1807 # Compute the matches from the filesystem
1732 if sys.platform == 'win32':
1808 if sys.platform == 'win32':
1733 m0 = self.clean_glob(text)
1809 m0 = self.clean_glob(text)
1734 else:
1810 else:
1735 m0 = self.clean_glob(text.replace('\\', ''))
1811 m0 = self.clean_glob(text.replace('\\', ''))
1736
1812
1737 if has_protectables:
1813 if has_protectables:
1738 # If we had protectables, we need to revert our changes to the
1814 # If we had protectables, we need to revert our changes to the
1739 # beginning of filename so that we don't double-write the part
1815 # beginning of filename so that we don't double-write the part
1740 # of the filename we have so far
1816 # of the filename we have so far
1741 len_lsplit = len(lsplit)
1817 len_lsplit = len(lsplit)
1742 matches = [text_prefix + text0 +
1818 matches = [text_prefix + text0 +
1743 protect_filename(f[len_lsplit:]) for f in m0]
1819 protect_filename(f[len_lsplit:]) for f in m0]
1744 else:
1820 else:
1745 if open_quotes:
1821 if open_quotes:
1746 # if we have a string with an open quote, we don't need to
1822 # if we have a string with an open quote, we don't need to
1747 # protect the names beyond the quote (and we _shouldn't_, as
1823 # protect the names beyond the quote (and we _shouldn't_, as
1748 # it would cause bugs when the filesystem call is made).
1824 # it would cause bugs when the filesystem call is made).
1749 matches = m0 if sys.platform == "win32" else\
1825 matches = m0 if sys.platform == "win32" else\
1750 [protect_filename(f, open_quotes) for f in m0]
1826 [protect_filename(f, open_quotes) for f in m0]
1751 else:
1827 else:
1752 matches = [text_prefix +
1828 matches = [text_prefix +
1753 protect_filename(f) for f in m0]
1829 protect_filename(f) for f in m0]
1754
1830
1755 # Mark directories in input list by appending '/' to their names.
1831 # Mark directories in input list by appending '/' to their names.
1756 return [x+'/' if os.path.isdir(x) else x for x in matches]
1832 return [x+'/' if os.path.isdir(x) else x for x in matches]
1757
1833
1758 @context_matcher()
1834 @context_matcher()
1759 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1835 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1760 """Match magics."""
1836 """Match magics."""
1761 text = context.token
1837 text = context.token
1762 matches = self.magic_matches(text)
1838 matches = self.magic_matches(text)
1763 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
1839 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
1764 is_magic_prefix = len(text) > 0 and text[0] == "%"
1840 is_magic_prefix = len(text) > 0 and text[0] == "%"
1765 result["suppress"] = is_magic_prefix and bool(result["completions"])
1841 result["suppress"] = is_magic_prefix and bool(result["completions"])
1766 return result
1842 return result
1767
1843
1768 def magic_matches(self, text: str):
1844 def magic_matches(self, text: str):
1769 """Match magics.
1845 """Match magics.
1770
1846
1771 .. deprecated:: 8.6
1847 .. deprecated:: 8.6
1772 You can use :meth:`magic_matcher` instead.
1848 You can use :meth:`magic_matcher` instead.
1773 """
1849 """
1774 # Get all shell magics now rather than statically, so magics loaded at
1850 # Get all shell magics now rather than statically, so magics loaded at
1775 # runtime show up too.
1851 # runtime show up too.
1776 lsm = self.shell.magics_manager.lsmagic()
1852 lsm = self.shell.magics_manager.lsmagic()
1777 line_magics = lsm['line']
1853 line_magics = lsm['line']
1778 cell_magics = lsm['cell']
1854 cell_magics = lsm['cell']
1779 pre = self.magic_escape
1855 pre = self.magic_escape
1780 pre2 = pre+pre
1856 pre2 = pre+pre
1781
1857
1782 explicit_magic = text.startswith(pre)
1858 explicit_magic = text.startswith(pre)
1783
1859
1784 # Completion logic:
1860 # Completion logic:
1785 # - user gives %%: only do cell magics
1861 # - user gives %%: only do cell magics
1786 # - user gives %: do both line and cell magics
1862 # - user gives %: do both line and cell magics
1787 # - no prefix: do both
1863 # - no prefix: do both
1788 # In other words, line magics are skipped if the user gives %% explicitly
1864 # In other words, line magics are skipped if the user gives %% explicitly
1789 #
1865 #
1790 # We also exclude magics that match any currently visible names:
1866 # We also exclude magics that match any currently visible names:
1791 # https://github.com/ipython/ipython/issues/4877, unless the user has
1867 # https://github.com/ipython/ipython/issues/4877, unless the user has
1792 # typed a %:
1868 # typed a %:
1793 # https://github.com/ipython/ipython/issues/10754
1869 # https://github.com/ipython/ipython/issues/10754
1794 bare_text = text.lstrip(pre)
1870 bare_text = text.lstrip(pre)
1795 global_matches = self.global_matches(bare_text)
1871 global_matches = self.global_matches(bare_text)
1796 if not explicit_magic:
1872 if not explicit_magic:
1797 def matches(magic):
1873 def matches(magic):
1798 """
1874 """
1799 Filter magics, in particular remove magics that match
1875 Filter magics, in particular remove magics that match
1800 a name present in global namespace.
1876 a name present in global namespace.
1801 """
1877 """
1802 return ( magic.startswith(bare_text) and
1878 return ( magic.startswith(bare_text) and
1803 magic not in global_matches )
1879 magic not in global_matches )
1804 else:
1880 else:
1805 def matches(magic):
1881 def matches(magic):
1806 return magic.startswith(bare_text)
1882 return magic.startswith(bare_text)
1807
1883
1808 comp = [ pre2+m for m in cell_magics if matches(m)]
1884 comp = [ pre2+m for m in cell_magics if matches(m)]
1809 if not text.startswith(pre2):
1885 if not text.startswith(pre2):
1810 comp += [ pre+m for m in line_magics if matches(m)]
1886 comp += [ pre+m for m in line_magics if matches(m)]
1811
1887
1812 return comp
1888 return comp
1813
1889
1814 @context_matcher()
1890 @context_matcher()
1815 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1891 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1816 """Match class names and attributes for %config magic."""
1892 """Match class names and attributes for %config magic."""
1817 # NOTE: uses `line_buffer` equivalent for compatibility
1893 # NOTE: uses `line_buffer` equivalent for compatibility
1818 matches = self.magic_config_matches(context.line_with_cursor)
1894 matches = self.magic_config_matches(context.line_with_cursor)
1819 return _convert_matcher_v1_result_to_v2(matches, type="param")
1895 return _convert_matcher_v1_result_to_v2(matches, type="param")
1820
1896
1821 def magic_config_matches(self, text: str) -> List[str]:
1897 def magic_config_matches(self, text: str) -> List[str]:
1822 """Match class names and attributes for %config magic.
1898 """Match class names and attributes for %config magic.
1823
1899
1824 .. deprecated:: 8.6
1900 .. deprecated:: 8.6
1825 You can use :meth:`magic_config_matcher` instead.
1901 You can use :meth:`magic_config_matcher` instead.
1826 """
1902 """
1827 texts = text.strip().split()
1903 texts = text.strip().split()
1828
1904
1829 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1905 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1830 # get all configuration classes
1906 # get all configuration classes
1831 classes = sorted(set([ c for c in self.shell.configurables
1907 classes = sorted(set([ c for c in self.shell.configurables
1832 if c.__class__.class_traits(config=True)
1908 if c.__class__.class_traits(config=True)
1833 ]), key=lambda x: x.__class__.__name__)
1909 ]), key=lambda x: x.__class__.__name__)
1834 classnames = [ c.__class__.__name__ for c in classes ]
1910 classnames = [ c.__class__.__name__ for c in classes ]
1835
1911
1836 # return all classnames if config or %config is given
1912 # return all classnames if config or %config is given
1837 if len(texts) == 1:
1913 if len(texts) == 1:
1838 return classnames
1914 return classnames
1839
1915
1840 # match classname
1916 # match classname
1841 classname_texts = texts[1].split('.')
1917 classname_texts = texts[1].split('.')
1842 classname = classname_texts[0]
1918 classname = classname_texts[0]
1843 classname_matches = [ c for c in classnames
1919 classname_matches = [ c for c in classnames
1844 if c.startswith(classname) ]
1920 if c.startswith(classname) ]
1845
1921
1846 # return matched classes or the matched class with attributes
1922 # return matched classes or the matched class with attributes
1847 if texts[1].find('.') < 0:
1923 if texts[1].find('.') < 0:
1848 return classname_matches
1924 return classname_matches
1849 elif len(classname_matches) == 1 and \
1925 elif len(classname_matches) == 1 and \
1850 classname_matches[0] == classname:
1926 classname_matches[0] == classname:
1851 cls = classes[classnames.index(classname)].__class__
1927 cls = classes[classnames.index(classname)].__class__
1852 help = cls.class_get_help()
1928 help = cls.class_get_help()
1853 # strip leading '--' from cl-args:
1929 # strip leading '--' from cl-args:
1854 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1930 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1855 return [ attr.split('=')[0]
1931 return [ attr.split('=')[0]
1856 for attr in help.strip().splitlines()
1932 for attr in help.strip().splitlines()
1857 if attr.startswith(texts[1]) ]
1933 if attr.startswith(texts[1]) ]
1858 return []
1934 return []
1859
1935
1860 @context_matcher()
1936 @context_matcher()
1861 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1937 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1862 """Match color schemes for %colors magic."""
1938 """Match color schemes for %colors magic."""
1863 # NOTE: uses `line_buffer` equivalent for compatibility
1939 # NOTE: uses `line_buffer` equivalent for compatibility
1864 matches = self.magic_color_matches(context.line_with_cursor)
1940 matches = self.magic_color_matches(context.line_with_cursor)
1865 return _convert_matcher_v1_result_to_v2(matches, type="param")
1941 return _convert_matcher_v1_result_to_v2(matches, type="param")
1866
1942
1867 def magic_color_matches(self, text: str) -> List[str]:
1943 def magic_color_matches(self, text: str) -> List[str]:
1868 """Match color schemes for %colors magic.
1944 """Match color schemes for %colors magic.
1869
1945
1870 .. deprecated:: 8.6
1946 .. deprecated:: 8.6
1871 You can use :meth:`magic_color_matcher` instead.
1947 You can use :meth:`magic_color_matcher` instead.
1872 """
1948 """
1873 texts = text.split()
1949 texts = text.split()
1874 if text.endswith(' '):
1950 if text.endswith(' '):
1875 # .split() strips off the trailing whitespace. Add '' back
1951 # .split() strips off the trailing whitespace. Add '' back
1876 # so that: '%colors ' -> ['%colors', '']
1952 # so that: '%colors ' -> ['%colors', '']
1877 texts.append('')
1953 texts.append('')
1878
1954
1879 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1955 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1880 prefix = texts[1]
1956 prefix = texts[1]
1881 return [ color for color in InspectColors.keys()
1957 return [ color for color in InspectColors.keys()
1882 if color.startswith(prefix) ]
1958 if color.startswith(prefix) ]
1883 return []
1959 return []
1884
1960
1885 @context_matcher(identifier="IPCompleter.jedi_matcher")
1961 @context_matcher(identifier="IPCompleter.jedi_matcher")
1886 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
1962 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
1887 matches = self._jedi_matches(
1963 matches = self._jedi_matches(
1888 cursor_column=context.cursor_position,
1964 cursor_column=context.cursor_position,
1889 cursor_line=context.cursor_line,
1965 cursor_line=context.cursor_line,
1890 text=context.full_text,
1966 text=context.full_text,
1891 )
1967 )
1892 return {
1968 return {
1893 "completions": matches,
1969 "completions": matches,
1894 # static analysis should not suppress other matchers
1970 # static analysis should not suppress other matchers
1895 "suppress": False,
1971 "suppress": False,
1896 }
1972 }
1897
1973
1898 def _jedi_matches(
1974 def _jedi_matches(
1899 self, cursor_column: int, cursor_line: int, text: str
1975 self, cursor_column: int, cursor_line: int, text: str
1900 ) -> Iterable[_JediCompletionLike]:
1976 ) -> Iterable[_JediCompletionLike]:
1901 """
1977 """
1902 Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
1978 Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
1903 cursor position.
1979 cursor position.
1904
1980
1905 Parameters
1981 Parameters
1906 ----------
1982 ----------
1907 cursor_column : int
1983 cursor_column : int
1908 column position of the cursor in ``text``, 0-indexed.
1984 column position of the cursor in ``text``, 0-indexed.
1909 cursor_line : int
1985 cursor_line : int
1910 line position of the cursor in ``text``, 0-indexed
1986 line position of the cursor in ``text``, 0-indexed
1911 text : str
1987 text : str
1912 text to complete
1988 text to complete
1913
1989
1914 Notes
1990 Notes
1915 -----
1991 -----
1916 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1992 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1917 object containing a string with the Jedi debug information attached.
1993 object containing a string with the Jedi debug information attached.
1918
1994
1919 .. deprecated:: 8.6
1995 .. deprecated:: 8.6
1920 You can use :meth:`_jedi_matcher` instead.
1996 You can use :meth:`_jedi_matcher` instead.
1921 """
1997 """
1922 namespaces = [self.namespace]
1998 namespaces = [self.namespace]
1923 if self.global_namespace is not None:
1999 if self.global_namespace is not None:
1924 namespaces.append(self.global_namespace)
2000 namespaces.append(self.global_namespace)
1925
2001
1926 completion_filter = lambda x:x
2002 completion_filter = lambda x:x
1927 offset = cursor_to_position(text, cursor_line, cursor_column)
2003 offset = cursor_to_position(text, cursor_line, cursor_column)
1928 # filter output if we are completing for object members
2004 # filter output if we are completing for object members
1929 if offset:
2005 if offset:
1930 pre = text[offset-1]
2006 pre = text[offset-1]
1931 if pre == '.':
2007 if pre == '.':
1932 if self.omit__names == 2:
2008 if self.omit__names == 2:
1933 completion_filter = lambda c:not c.name.startswith('_')
2009 completion_filter = lambda c:not c.name.startswith('_')
1934 elif self.omit__names == 1:
2010 elif self.omit__names == 1:
1935 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
2011 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1936 elif self.omit__names == 0:
2012 elif self.omit__names == 0:
1937 completion_filter = lambda x:x
2013 completion_filter = lambda x:x
1938 else:
2014 else:
1939 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
2015 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1940
2016
1941 interpreter = jedi.Interpreter(text[:offset], namespaces)
2017 interpreter = jedi.Interpreter(text[:offset], namespaces)
1942 try_jedi = True
2018 try_jedi = True
1943
2019
1944 try:
2020 try:
1945 # find the first token in the current tree -- if it is a ' or " then we are in a string
2021 # find the first token in the current tree -- if it is a ' or " then we are in a string
1946 completing_string = False
2022 completing_string = False
1947 try:
2023 try:
1948 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
2024 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1949 except StopIteration:
2025 except StopIteration:
1950 pass
2026 pass
1951 else:
2027 else:
1952 # note the value may be ', ", or it may also be ''' or """, or
2028 # note the value may be ', ", or it may also be ''' or """, or
1953 # in some cases, """what/you/typed..., but all of these are
2029 # in some cases, """what/you/typed..., but all of these are
1954 # strings.
2030 # strings.
1955 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
2031 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1956
2032
1957 # if we are in a string jedi is likely not the right candidate for
2033 # if we are in a string jedi is likely not the right candidate for
1958 # now. Skip it.
2034 # now. Skip it.
1959 try_jedi = not completing_string
2035 try_jedi = not completing_string
1960 except Exception as e:
2036 except Exception as e:
1961 # many of things can go wrong, we are using private API just don't crash.
2037 # many of things can go wrong, we are using private API just don't crash.
1962 if self.debug:
2038 if self.debug:
1963 print("Error detecting if completing a non-finished string :", e, '|')
2039 print("Error detecting if completing a non-finished string :", e, '|')
1964
2040
1965 if not try_jedi:
2041 if not try_jedi:
1966 return []
2042 return []
1967 try:
2043 try:
1968 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2044 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1969 except Exception as e:
2045 except Exception as e:
1970 if self.debug:
2046 if self.debug:
1971 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
2047 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1972 else:
2048 else:
1973 return []
2049 return []
1974
2050
1975 def python_matches(self, text: str) -> Iterable[str]:
2051 def python_matches(self, text: str) -> Iterable[str]:
1976 """Match attributes or global python names"""
2052 """Match attributes or global python names"""
1977 if "." in text:
2053 if "." in text:
1978 try:
2054 try:
1979 matches = self.attr_matches(text)
2055 matches = self.attr_matches(text)
1980 if text.endswith('.') and self.omit__names:
2056 if text.endswith('.') and self.omit__names:
1981 if self.omit__names == 1:
2057 if self.omit__names == 1:
1982 # true if txt is _not_ a __ name, false otherwise:
2058 # true if txt is _not_ a __ name, false otherwise:
1983 no__name = (lambda txt:
2059 no__name = (lambda txt:
1984 re.match(r'.*\.__.*?__',txt) is None)
2060 re.match(r'.*\.__.*?__',txt) is None)
1985 else:
2061 else:
1986 # true if txt is _not_ a _ name, false otherwise:
2062 # true if txt is _not_ a _ name, false otherwise:
1987 no__name = (lambda txt:
2063 no__name = (lambda txt:
1988 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
2064 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1989 matches = filter(no__name, matches)
2065 matches = filter(no__name, matches)
1990 except NameError:
2066 except NameError:
1991 # catches <undefined attributes>.<tab>
2067 # catches <undefined attributes>.<tab>
1992 matches = []
2068 matches = []
1993 else:
2069 else:
1994 matches = self.global_matches(text)
2070 matches = self.global_matches(text)
1995 return matches
2071 return matches
1996
2072
1997 def _default_arguments_from_docstring(self, doc):
2073 def _default_arguments_from_docstring(self, doc):
1998 """Parse the first line of docstring for call signature.
2074 """Parse the first line of docstring for call signature.
1999
2075
2000 Docstring should be of the form 'min(iterable[, key=func])\n'.
2076 Docstring should be of the form 'min(iterable[, key=func])\n'.
2001 It can also parse cython docstring of the form
2077 It can also parse cython docstring of the form
2002 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2078 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2003 """
2079 """
2004 if doc is None:
2080 if doc is None:
2005 return []
2081 return []
2006
2082
2007 #care only the firstline
2083 #care only the firstline
2008 line = doc.lstrip().splitlines()[0]
2084 line = doc.lstrip().splitlines()[0]
2009
2085
2010 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2086 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2011 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2087 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2012 sig = self.docstring_sig_re.search(line)
2088 sig = self.docstring_sig_re.search(line)
2013 if sig is None:
2089 if sig is None:
2014 return []
2090 return []
2015 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2091 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2016 sig = sig.groups()[0].split(',')
2092 sig = sig.groups()[0].split(',')
2017 ret = []
2093 ret = []
2018 for s in sig:
2094 for s in sig:
2019 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2095 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2020 ret += self.docstring_kwd_re.findall(s)
2096 ret += self.docstring_kwd_re.findall(s)
2021 return ret
2097 return ret
2022
2098
2023 def _default_arguments(self, obj):
2099 def _default_arguments(self, obj):
2024 """Return the list of default arguments of obj if it is callable,
2100 """Return the list of default arguments of obj if it is callable,
2025 or empty list otherwise."""
2101 or empty list otherwise."""
2026 call_obj = obj
2102 call_obj = obj
2027 ret = []
2103 ret = []
2028 if inspect.isbuiltin(obj):
2104 if inspect.isbuiltin(obj):
2029 pass
2105 pass
2030 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2106 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2031 if inspect.isclass(obj):
2107 if inspect.isclass(obj):
2032 #for cython embedsignature=True the constructor docstring
2108 #for cython embedsignature=True the constructor docstring
2033 #belongs to the object itself not __init__
2109 #belongs to the object itself not __init__
2034 ret += self._default_arguments_from_docstring(
2110 ret += self._default_arguments_from_docstring(
2035 getattr(obj, '__doc__', ''))
2111 getattr(obj, '__doc__', ''))
2036 # for classes, check for __init__,__new__
2112 # for classes, check for __init__,__new__
2037 call_obj = (getattr(obj, '__init__', None) or
2113 call_obj = (getattr(obj, '__init__', None) or
2038 getattr(obj, '__new__', None))
2114 getattr(obj, '__new__', None))
2039 # for all others, check if they are __call__able
2115 # for all others, check if they are __call__able
2040 elif hasattr(obj, '__call__'):
2116 elif hasattr(obj, '__call__'):
2041 call_obj = obj.__call__
2117 call_obj = obj.__call__
2042 ret += self._default_arguments_from_docstring(
2118 ret += self._default_arguments_from_docstring(
2043 getattr(call_obj, '__doc__', ''))
2119 getattr(call_obj, '__doc__', ''))
2044
2120
2045 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2121 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2046 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2122 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2047
2123
2048 try:
2124 try:
2049 sig = inspect.signature(obj)
2125 sig = inspect.signature(obj)
2050 ret.extend(k for k, v in sig.parameters.items() if
2126 ret.extend(k for k, v in sig.parameters.items() if
2051 v.kind in _keeps)
2127 v.kind in _keeps)
2052 except ValueError:
2128 except ValueError:
2053 pass
2129 pass
2054
2130
2055 return list(set(ret))
2131 return list(set(ret))
2056
2132
2057 @context_matcher()
2133 @context_matcher()
2058 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2134 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2059 """Match named parameters (kwargs) of the last open function."""
2135 """Match named parameters (kwargs) of the last open function."""
2060 matches = self.python_func_kw_matches(context.token)
2136 matches = self.python_func_kw_matches(context.token)
2061 return _convert_matcher_v1_result_to_v2(matches, type="param")
2137 return _convert_matcher_v1_result_to_v2(matches, type="param")
2062
2138
2063 def python_func_kw_matches(self, text):
2139 def python_func_kw_matches(self, text):
2064 """Match named parameters (kwargs) of the last open function.
2140 """Match named parameters (kwargs) of the last open function.
2065
2141
2066 .. deprecated:: 8.6
2142 .. deprecated:: 8.6
2067 You can use :meth:`python_func_kw_matcher` instead.
2143 You can use :meth:`python_func_kw_matcher` instead.
2068 """
2144 """
2069
2145
2070 if "." in text: # a parameter cannot be dotted
2146 if "." in text: # a parameter cannot be dotted
2071 return []
2147 return []
2072 try: regexp = self.__funcParamsRegex
2148 try: regexp = self.__funcParamsRegex
2073 except AttributeError:
2149 except AttributeError:
2074 regexp = self.__funcParamsRegex = re.compile(r'''
2150 regexp = self.__funcParamsRegex = re.compile(r'''
2075 '.*?(?<!\\)' | # single quoted strings or
2151 '.*?(?<!\\)' | # single quoted strings or
2076 ".*?(?<!\\)" | # double quoted strings or
2152 ".*?(?<!\\)" | # double quoted strings or
2077 \w+ | # identifier
2153 \w+ | # identifier
2078 \S # other characters
2154 \S # other characters
2079 ''', re.VERBOSE | re.DOTALL)
2155 ''', re.VERBOSE | re.DOTALL)
2080 # 1. find the nearest identifier that comes before an unclosed
2156 # 1. find the nearest identifier that comes before an unclosed
2081 # parenthesis before the cursor
2157 # parenthesis before the cursor
2082 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2158 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2083 tokens = regexp.findall(self.text_until_cursor)
2159 tokens = regexp.findall(self.text_until_cursor)
2084 iterTokens = reversed(tokens); openPar = 0
2160 iterTokens = reversed(tokens); openPar = 0
2085
2161
2086 for token in iterTokens:
2162 for token in iterTokens:
2087 if token == ')':
2163 if token == ')':
2088 openPar -= 1
2164 openPar -= 1
2089 elif token == '(':
2165 elif token == '(':
2090 openPar += 1
2166 openPar += 1
2091 if openPar > 0:
2167 if openPar > 0:
2092 # found the last unclosed parenthesis
2168 # found the last unclosed parenthesis
2093 break
2169 break
2094 else:
2170 else:
2095 return []
2171 return []
2096 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2172 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2097 ids = []
2173 ids = []
2098 isId = re.compile(r'\w+$').match
2174 isId = re.compile(r'\w+$').match
2099
2175
2100 while True:
2176 while True:
2101 try:
2177 try:
2102 ids.append(next(iterTokens))
2178 ids.append(next(iterTokens))
2103 if not isId(ids[-1]):
2179 if not isId(ids[-1]):
2104 ids.pop(); break
2180 ids.pop(); break
2105 if not next(iterTokens) == '.':
2181 if not next(iterTokens) == '.':
2106 break
2182 break
2107 except StopIteration:
2183 except StopIteration:
2108 break
2184 break
2109
2185
2110 # Find all named arguments already assigned to, as to avoid suggesting
2186 # Find all named arguments already assigned to, as to avoid suggesting
2111 # them again
2187 # them again
2112 usedNamedArgs = set()
2188 usedNamedArgs = set()
2113 par_level = -1
2189 par_level = -1
2114 for token, next_token in zip(tokens, tokens[1:]):
2190 for token, next_token in zip(tokens, tokens[1:]):
2115 if token == '(':
2191 if token == '(':
2116 par_level += 1
2192 par_level += 1
2117 elif token == ')':
2193 elif token == ')':
2118 par_level -= 1
2194 par_level -= 1
2119
2195
2120 if par_level != 0:
2196 if par_level != 0:
2121 continue
2197 continue
2122
2198
2123 if next_token != '=':
2199 if next_token != '=':
2124 continue
2200 continue
2125
2201
2126 usedNamedArgs.add(token)
2202 usedNamedArgs.add(token)
2127
2203
2128 argMatches = []
2204 argMatches = []
2129 try:
2205 try:
2130 callableObj = '.'.join(ids[::-1])
2206 callableObj = '.'.join(ids[::-1])
2131 namedArgs = self._default_arguments(eval(callableObj,
2207 namedArgs = self._default_arguments(eval(callableObj,
2132 self.namespace))
2208 self.namespace))
2133
2209
2134 # Remove used named arguments from the list, no need to show twice
2210 # Remove used named arguments from the list, no need to show twice
2135 for namedArg in set(namedArgs) - usedNamedArgs:
2211 for namedArg in set(namedArgs) - usedNamedArgs:
2136 if namedArg.startswith(text):
2212 if namedArg.startswith(text):
2137 argMatches.append("%s=" %namedArg)
2213 argMatches.append("%s=" %namedArg)
2138 except:
2214 except:
2139 pass
2215 pass
2140
2216
2141 return argMatches
2217 return argMatches
2142
2218
2143 @staticmethod
2219 @staticmethod
2144 def _get_keys(obj: Any) -> List[Any]:
2220 def _get_keys(obj: Any) -> List[Any]:
2145 # Objects can define their own completions by defining an
2221 # Objects can define their own completions by defining an
2146 # _ipy_key_completions_() method.
2222 # _ipy_key_completions_() method.
2147 method = get_real_method(obj, '_ipython_key_completions_')
2223 method = get_real_method(obj, '_ipython_key_completions_')
2148 if method is not None:
2224 if method is not None:
2149 return method()
2225 return method()
2150
2226
2151 # Special case some common in-memory dict-like types
2227 # Special case some common in-memory dict-like types
2152 if isinstance(obj, dict) or\
2228 if (isinstance(obj, dict) or
2153 _safe_isinstance(obj, 'pandas', 'DataFrame'):
2229 _safe_isinstance(obj, 'pandas', 'DataFrame')):
2154 try:
2230 try:
2155 return list(obj.keys())
2231 return list(obj.keys())
2156 except Exception:
2232 except Exception:
2157 return []
2233 return []
2234 elif _safe_isinstance(obj, 'pandas', 'core', 'indexing', '_LocIndexer'):
2235 try:
2236 return list(obj.obj.keys())
2237 except Exception:
2238 return []
2158 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2239 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2159 _safe_isinstance(obj, 'numpy', 'void'):
2240 _safe_isinstance(obj, 'numpy', 'void'):
2160 return obj.dtype.names or []
2241 return obj.dtype.names or []
2161 return []
2242 return []
2162
2243
2163 @context_matcher()
2244 @context_matcher()
2164 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2245 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2165 """Match string keys in a dictionary, after e.g. ``foo[``."""
2246 """Match string keys in a dictionary, after e.g. ``foo[``."""
2166 matches = self.dict_key_matches(context.token)
2247 matches = self.dict_key_matches(context.token)
2167 return _convert_matcher_v1_result_to_v2(
2248 return _convert_matcher_v1_result_to_v2(
2168 matches, type="dict key", suppress_if_matches=True
2249 matches, type="dict key", suppress_if_matches=True
2169 )
2250 )
2170
2251
2171 def dict_key_matches(self, text: str) -> List[str]:
2252 def dict_key_matches(self, text: str) -> List[str]:
2172 """Match string keys in a dictionary, after e.g. ``foo[``.
2253 """Match string keys in a dictionary, after e.g. ``foo[``.
2173
2254
2174 .. deprecated:: 8.6
2255 .. deprecated:: 8.6
2175 You can use :meth:`dict_key_matcher` instead.
2256 You can use :meth:`dict_key_matcher` instead.
2176 """
2257 """
2177
2258
2178 if self.__dict_key_regexps is not None:
2259 # Short-circuit on closed dictionary (regular expression would
2179 regexps = self.__dict_key_regexps
2260 # not match anyway, but would take quite a while).
2180 else:
2261 if self.text_until_cursor.strip().endswith(']'):
2181 dict_key_re_fmt = r'''(?x)
2262 return []
2182 ( # match dict-referring expression wrt greedy setting
2183 %s
2184 )
2185 \[ # open bracket
2186 \s* # and optional whitespace
2187 # Capture any number of str-like objects (e.g. "a", "b", 'c')
2188 ((?:[uUbB]? # string prefix (r not handled)
2189 (?:
2190 '(?:[^']|(?<!\\)\\')*'
2191 |
2192 "(?:[^"]|(?<!\\)\\")*"
2193 )
2194 \s*,\s*
2195 )*)
2196 ([uUbB]? # string prefix (r not handled)
2197 (?: # unclosed string
2198 '(?:[^']|(?<!\\)\\')*
2199 |
2200 "(?:[^"]|(?<!\\)\\")*
2201 )
2202 )?
2203 $
2204 '''
2205 regexps = self.__dict_key_regexps = {
2206 False: re.compile(dict_key_re_fmt % r'''
2207 # identifiers separated by .
2208 (?!\d)\w+
2209 (?:\.(?!\d)\w+)*
2210 '''),
2211 True: re.compile(dict_key_re_fmt % '''
2212 .+
2213 ''')
2214 }
2215
2263
2216 match = regexps[self.greedy].search(self.text_until_cursor)
2264 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2217
2265
2218 if match is None:
2266 if match is None:
2219 return []
2267 return []
2220
2268
2221 expr, prefix0, prefix = match.groups()
2269 expr, prior_tuple_keys, key_prefix = match.groups()
2222 try:
2270
2223 obj = eval(expr, self.namespace)
2271 obj = self._evaluate_expr(expr)
2224 except Exception:
2272
2225 try:
2273 if obj is not_found:
2226 obj = eval(expr, self.global_namespace)
2274 return []
2227 except Exception:
2228 return []
2229
2275
2230 keys = self._get_keys(obj)
2276 keys = self._get_keys(obj)
2231 if not keys:
2277 if not keys:
2232 return keys
2278 return keys
2233
2279
2234 extra_prefix = eval(prefix0) if prefix0 != '' else None
2280 tuple_prefix = guarded_eval(
2281 prior_tuple_keys,
2282 EvaluationContext(
2283 globals_=self.global_namespace,
2284 locals_=self.namespace,
2285 evaluation=self.evaluation,
2286 in_subscript=True
2287 )
2288 )
2235
2289
2236 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
2290 closing_quote, token_offset, matches = match_dict_keys(
2291 keys,
2292 key_prefix,
2293 self.splitter.delims,
2294 extra_prefix=tuple_prefix
2295 )
2237 if not matches:
2296 if not matches:
2238 return matches
2297 return matches
2239
2298
2240 # get the cursor position of
2299 # get the cursor position of
2241 # - the text being completed
2300 # - the text being completed
2242 # - the start of the key text
2301 # - the start of the key text
2243 # - the start of the completion
2302 # - the start of the completion
2244 text_start = len(self.text_until_cursor) - len(text)
2303 text_start = len(self.text_until_cursor) - len(text)
2245 if prefix:
2304 if key_prefix:
2246 key_start = match.start(3)
2305 key_start = match.start(3)
2247 completion_start = key_start + token_offset
2306 completion_start = key_start + token_offset
2248 else:
2307 else:
2249 key_start = completion_start = match.end()
2308 key_start = completion_start = match.end()
2250
2309
2251 # grab the leading prefix, to make sure all completions start with `text`
2310 # grab the leading prefix, to make sure all completions start with `text`
2252 if text_start > key_start:
2311 if text_start > key_start:
2253 leading = ''
2312 leading = ''
2254 else:
2313 else:
2255 leading = text[text_start:completion_start]
2314 leading = text[text_start:completion_start]
2256
2315
2257 # the index of the `[` character
2316 # the index of the `[` character
2258 bracket_idx = match.end(1)
2317 bracket_idx = match.end(1)
2259
2318
2260 # append closing quote and bracket as appropriate
2319 # append closing quote and bracket as appropriate
2261 # this is *not* appropriate if the opening quote or bracket is outside
2320 # this is *not* appropriate if the opening quote or bracket is outside
2262 # the text given to this method
2321 # the text given to this method
2263 suf = ''
2322 suf = ''
2264 continuation = self.line_buffer[len(self.text_until_cursor):]
2323 continuation = self.line_buffer[len(self.text_until_cursor):]
2265 if key_start > text_start and closing_quote:
2324 if key_start > text_start and closing_quote:
2266 # quotes were opened inside text, maybe close them
2325 # quotes were opened inside text, maybe close them
2267 if continuation.startswith(closing_quote):
2326 if continuation.startswith(closing_quote):
2268 continuation = continuation[len(closing_quote):]
2327 continuation = continuation[len(closing_quote):]
2269 else:
2328 else:
2270 suf += closing_quote
2329 suf += closing_quote
2271 if bracket_idx > text_start:
2330 if bracket_idx > text_start:
2272 # brackets were opened inside text, maybe close them
2331 # brackets were opened inside text, maybe close them
2273 if not continuation.startswith(']'):
2332 if not continuation.startswith(']'):
2274 suf += ']'
2333 suf += ']'
2275
2334
2276 return [leading + k + suf for k in matches]
2335 return [leading + k + suf for k in matches]
2277
2336
2278 @context_matcher()
2337 @context_matcher()
2279 def unicode_name_matcher(self, context: CompletionContext):
2338 def unicode_name_matcher(self, context: CompletionContext):
2280 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2339 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2281 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2340 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2282 return _convert_matcher_v1_result_to_v2(
2341 return _convert_matcher_v1_result_to_v2(
2283 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2342 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2284 )
2343 )
2285
2344
2286 @staticmethod
2345 @staticmethod
2287 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2346 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2288 """Match Latex-like syntax for unicode characters base
2347 """Match Latex-like syntax for unicode characters base
2289 on the name of the character.
2348 on the name of the character.
2290
2349
2291 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2350 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2292
2351
2293 Works only on valid python 3 identifier, or on combining characters that
2352 Works only on valid python 3 identifier, or on combining characters that
2294 will combine to form a valid identifier.
2353 will combine to form a valid identifier.
2295 """
2354 """
2296 slashpos = text.rfind('\\')
2355 slashpos = text.rfind('\\')
2297 if slashpos > -1:
2356 if slashpos > -1:
2298 s = text[slashpos+1:]
2357 s = text[slashpos+1:]
2299 try :
2358 try :
2300 unic = unicodedata.lookup(s)
2359 unic = unicodedata.lookup(s)
2301 # allow combining chars
2360 # allow combining chars
2302 if ('a'+unic).isidentifier():
2361 if ('a'+unic).isidentifier():
2303 return '\\'+s,[unic]
2362 return '\\'+s,[unic]
2304 except KeyError:
2363 except KeyError:
2305 pass
2364 pass
2306 return '', []
2365 return '', []
2307
2366
2308 @context_matcher()
2367 @context_matcher()
2309 def latex_name_matcher(self, context: CompletionContext):
2368 def latex_name_matcher(self, context: CompletionContext):
2310 """Match Latex syntax for unicode characters.
2369 """Match Latex syntax for unicode characters.
2311
2370
2312 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2371 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2313 """
2372 """
2314 fragment, matches = self.latex_matches(context.text_until_cursor)
2373 fragment, matches = self.latex_matches(context.text_until_cursor)
2315 return _convert_matcher_v1_result_to_v2(
2374 return _convert_matcher_v1_result_to_v2(
2316 matches, type="latex", fragment=fragment, suppress_if_matches=True
2375 matches, type="latex", fragment=fragment, suppress_if_matches=True
2317 )
2376 )
2318
2377
2319 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2378 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2320 """Match Latex syntax for unicode characters.
2379 """Match Latex syntax for unicode characters.
2321
2380
2322 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2381 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2323
2382
2324 .. deprecated:: 8.6
2383 .. deprecated:: 8.6
2325 You can use :meth:`latex_name_matcher` instead.
2384 You can use :meth:`latex_name_matcher` instead.
2326 """
2385 """
2327 slashpos = text.rfind('\\')
2386 slashpos = text.rfind('\\')
2328 if slashpos > -1:
2387 if slashpos > -1:
2329 s = text[slashpos:]
2388 s = text[slashpos:]
2330 if s in latex_symbols:
2389 if s in latex_symbols:
2331 # Try to complete a full latex symbol to unicode
2390 # Try to complete a full latex symbol to unicode
2332 # \\alpha -> Ξ±
2391 # \\alpha -> Ξ±
2333 return s, [latex_symbols[s]]
2392 return s, [latex_symbols[s]]
2334 else:
2393 else:
2335 # If a user has partially typed a latex symbol, give them
2394 # If a user has partially typed a latex symbol, give them
2336 # a full list of options \al -> [\aleph, \alpha]
2395 # a full list of options \al -> [\aleph, \alpha]
2337 matches = [k for k in latex_symbols if k.startswith(s)]
2396 matches = [k for k in latex_symbols if k.startswith(s)]
2338 if matches:
2397 if matches:
2339 return s, matches
2398 return s, matches
2340 return '', ()
2399 return '', ()
2341
2400
2342 @context_matcher()
2401 @context_matcher()
2343 def custom_completer_matcher(self, context):
2402 def custom_completer_matcher(self, context):
2344 """Dispatch custom completer.
2403 """Dispatch custom completer.
2345
2404
2346 If a match is found, suppresses all other matchers except for Jedi.
2405 If a match is found, suppresses all other matchers except for Jedi.
2347 """
2406 """
2348 matches = self.dispatch_custom_completer(context.token) or []
2407 matches = self.dispatch_custom_completer(context.token) or []
2349 result = _convert_matcher_v1_result_to_v2(
2408 result = _convert_matcher_v1_result_to_v2(
2350 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2409 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2351 )
2410 )
2352 result["ordered"] = True
2411 result["ordered"] = True
2353 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2412 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2354 return result
2413 return result
2355
2414
2356 def dispatch_custom_completer(self, text):
2415 def dispatch_custom_completer(self, text):
2357 """
2416 """
2358 .. deprecated:: 8.6
2417 .. deprecated:: 8.6
2359 You can use :meth:`custom_completer_matcher` instead.
2418 You can use :meth:`custom_completer_matcher` instead.
2360 """
2419 """
2361 if not self.custom_completers:
2420 if not self.custom_completers:
2362 return
2421 return
2363
2422
2364 line = self.line_buffer
2423 line = self.line_buffer
2365 if not line.strip():
2424 if not line.strip():
2366 return None
2425 return None
2367
2426
2368 # Create a little structure to pass all the relevant information about
2427 # Create a little structure to pass all the relevant information about
2369 # the current completion to any custom completer.
2428 # the current completion to any custom completer.
2370 event = SimpleNamespace()
2429 event = SimpleNamespace()
2371 event.line = line
2430 event.line = line
2372 event.symbol = text
2431 event.symbol = text
2373 cmd = line.split(None,1)[0]
2432 cmd = line.split(None,1)[0]
2374 event.command = cmd
2433 event.command = cmd
2375 event.text_until_cursor = self.text_until_cursor
2434 event.text_until_cursor = self.text_until_cursor
2376
2435
2377 # for foo etc, try also to find completer for %foo
2436 # for foo etc, try also to find completer for %foo
2378 if not cmd.startswith(self.magic_escape):
2437 if not cmd.startswith(self.magic_escape):
2379 try_magic = self.custom_completers.s_matches(
2438 try_magic = self.custom_completers.s_matches(
2380 self.magic_escape + cmd)
2439 self.magic_escape + cmd)
2381 else:
2440 else:
2382 try_magic = []
2441 try_magic = []
2383
2442
2384 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2443 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2385 try_magic,
2444 try_magic,
2386 self.custom_completers.flat_matches(self.text_until_cursor)):
2445 self.custom_completers.flat_matches(self.text_until_cursor)):
2387 try:
2446 try:
2388 res = c(event)
2447 res = c(event)
2389 if res:
2448 if res:
2390 # first, try case sensitive match
2449 # first, try case sensitive match
2391 withcase = [r for r in res if r.startswith(text)]
2450 withcase = [r for r in res if r.startswith(text)]
2392 if withcase:
2451 if withcase:
2393 return withcase
2452 return withcase
2394 # if none, then case insensitive ones are ok too
2453 # if none, then case insensitive ones are ok too
2395 text_low = text.lower()
2454 text_low = text.lower()
2396 return [r for r in res if r.lower().startswith(text_low)]
2455 return [r for r in res if r.lower().startswith(text_low)]
2397 except TryNext:
2456 except TryNext:
2398 pass
2457 pass
2399 except KeyboardInterrupt:
2458 except KeyboardInterrupt:
2400 """
2459 """
2401 If custom completer take too long,
2460 If custom completer take too long,
2402 let keyboard interrupt abort and return nothing.
2461 let keyboard interrupt abort and return nothing.
2403 """
2462 """
2404 break
2463 break
2405
2464
2406 return None
2465 return None
2407
2466
2408 def completions(self, text: str, offset: int)->Iterator[Completion]:
2467 def completions(self, text: str, offset: int)->Iterator[Completion]:
2409 """
2468 """
2410 Returns an iterator over the possible completions
2469 Returns an iterator over the possible completions
2411
2470
2412 .. warning::
2471 .. warning::
2413
2472
2414 Unstable
2473 Unstable
2415
2474
2416 This function is unstable, API may change without warning.
2475 This function is unstable, API may change without warning.
2417 It will also raise unless use in proper context manager.
2476 It will also raise unless use in proper context manager.
2418
2477
2419 Parameters
2478 Parameters
2420 ----------
2479 ----------
2421 text : str
2480 text : str
2422 Full text of the current input, multi line string.
2481 Full text of the current input, multi line string.
2423 offset : int
2482 offset : int
2424 Integer representing the position of the cursor in ``text``. Offset
2483 Integer representing the position of the cursor in ``text``. Offset
2425 is 0-based indexed.
2484 is 0-based indexed.
2426
2485
2427 Yields
2486 Yields
2428 ------
2487 ------
2429 Completion
2488 Completion
2430
2489
2431 Notes
2490 Notes
2432 -----
2491 -----
2433 The cursor on a text can either be seen as being "in between"
2492 The cursor on a text can either be seen as being "in between"
2434 characters or "On" a character depending on the interface visible to
2493 characters or "On" a character depending on the interface visible to
2435 the user. For consistency the cursor being on "in between" characters X
2494 the user. For consistency the cursor being on "in between" characters X
2436 and Y is equivalent to the cursor being "on" character Y, that is to say
2495 and Y is equivalent to the cursor being "on" character Y, that is to say
2437 the character the cursor is on is considered as being after the cursor.
2496 the character the cursor is on is considered as being after the cursor.
2438
2497
2439 Combining characters may span more that one position in the
2498 Combining characters may span more that one position in the
2440 text.
2499 text.
2441
2500
2442 .. note::
2501 .. note::
2443
2502
2444 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2503 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2445 fake Completion token to distinguish completion returned by Jedi
2504 fake Completion token to distinguish completion returned by Jedi
2446 and usual IPython completion.
2505 and usual IPython completion.
2447
2506
2448 .. note::
2507 .. note::
2449
2508
2450 Completions are not completely deduplicated yet. If identical
2509 Completions are not completely deduplicated yet. If identical
2451 completions are coming from different sources this function does not
2510 completions are coming from different sources this function does not
2452 ensure that each completion object will only be present once.
2511 ensure that each completion object will only be present once.
2453 """
2512 """
2454 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2513 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2455 "It may change without warnings. "
2514 "It may change without warnings. "
2456 "Use in corresponding context manager.",
2515 "Use in corresponding context manager.",
2457 category=ProvisionalCompleterWarning, stacklevel=2)
2516 category=ProvisionalCompleterWarning, stacklevel=2)
2458
2517
2459 seen = set()
2518 seen = set()
2460 profiler:Optional[cProfile.Profile]
2519 profiler:Optional[cProfile.Profile]
2461 try:
2520 try:
2462 if self.profile_completions:
2521 if self.profile_completions:
2463 import cProfile
2522 import cProfile
2464 profiler = cProfile.Profile()
2523 profiler = cProfile.Profile()
2465 profiler.enable()
2524 profiler.enable()
2466 else:
2525 else:
2467 profiler = None
2526 profiler = None
2468
2527
2469 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2528 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2470 if c and (c in seen):
2529 if c and (c in seen):
2471 continue
2530 continue
2472 yield c
2531 yield c
2473 seen.add(c)
2532 seen.add(c)
2474 except KeyboardInterrupt:
2533 except KeyboardInterrupt:
2475 """if completions take too long and users send keyboard interrupt,
2534 """if completions take too long and users send keyboard interrupt,
2476 do not crash and return ASAP. """
2535 do not crash and return ASAP. """
2477 pass
2536 pass
2478 finally:
2537 finally:
2479 if profiler is not None:
2538 if profiler is not None:
2480 profiler.disable()
2539 profiler.disable()
2481 ensure_dir_exists(self.profiler_output_dir)
2540 ensure_dir_exists(self.profiler_output_dir)
2482 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2541 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2483 print("Writing profiler output to", output_path)
2542 print("Writing profiler output to", output_path)
2484 profiler.dump_stats(output_path)
2543 profiler.dump_stats(output_path)
2485
2544
2486 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2545 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2487 """
2546 """
2488 Core completion module.Same signature as :any:`completions`, with the
2547 Core completion module.Same signature as :any:`completions`, with the
2489 extra `timeout` parameter (in seconds).
2548 extra `timeout` parameter (in seconds).
2490
2549
2491 Computing jedi's completion ``.type`` can be quite expensive (it is a
2550 Computing jedi's completion ``.type`` can be quite expensive (it is a
2492 lazy property) and can require some warm-up, more warm up than just
2551 lazy property) and can require some warm-up, more warm up than just
2493 computing the ``name`` of a completion. The warm-up can be :
2552 computing the ``name`` of a completion. The warm-up can be :
2494
2553
2495 - Long warm-up the first time a module is encountered after
2554 - Long warm-up the first time a module is encountered after
2496 install/update: actually build parse/inference tree.
2555 install/update: actually build parse/inference tree.
2497
2556
2498 - first time the module is encountered in a session: load tree from
2557 - first time the module is encountered in a session: load tree from
2499 disk.
2558 disk.
2500
2559
2501 We don't want to block completions for tens of seconds so we give the
2560 We don't want to block completions for tens of seconds so we give the
2502 completer a "budget" of ``_timeout`` seconds per invocation to compute
2561 completer a "budget" of ``_timeout`` seconds per invocation to compute
2503 completions types, the completions that have not yet been computed will
2562 completions types, the completions that have not yet been computed will
2504 be marked as "unknown" an will have a chance to be computed next round
2563 be marked as "unknown" an will have a chance to be computed next round
2505 are things get cached.
2564 are things get cached.
2506
2565
2507 Keep in mind that Jedi is not the only thing treating the completion so
2566 Keep in mind that Jedi is not the only thing treating the completion so
2508 keep the timeout short-ish as if we take more than 0.3 second we still
2567 keep the timeout short-ish as if we take more than 0.3 second we still
2509 have lots of processing to do.
2568 have lots of processing to do.
2510
2569
2511 """
2570 """
2512 deadline = time.monotonic() + _timeout
2571 deadline = time.monotonic() + _timeout
2513
2572
2514 before = full_text[:offset]
2573 before = full_text[:offset]
2515 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2574 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2516
2575
2517 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2576 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2518
2577
2519 results = self._complete(
2578 results = self._complete(
2520 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2579 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2521 )
2580 )
2522 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2581 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2523 identifier: result
2582 identifier: result
2524 for identifier, result in results.items()
2583 for identifier, result in results.items()
2525 if identifier != jedi_matcher_id
2584 if identifier != jedi_matcher_id
2526 }
2585 }
2527
2586
2528 jedi_matches = (
2587 jedi_matches = (
2529 cast(results[jedi_matcher_id], _JediMatcherResult)["completions"]
2588 cast(results[jedi_matcher_id], _JediMatcherResult)["completions"]
2530 if jedi_matcher_id in results
2589 if jedi_matcher_id in results
2531 else ()
2590 else ()
2532 )
2591 )
2533
2592
2534 iter_jm = iter(jedi_matches)
2593 iter_jm = iter(jedi_matches)
2535 if _timeout:
2594 if _timeout:
2536 for jm in iter_jm:
2595 for jm in iter_jm:
2537 try:
2596 try:
2538 type_ = jm.type
2597 type_ = jm.type
2539 except Exception:
2598 except Exception:
2540 if self.debug:
2599 if self.debug:
2541 print("Error in Jedi getting type of ", jm)
2600 print("Error in Jedi getting type of ", jm)
2542 type_ = None
2601 type_ = None
2543 delta = len(jm.name_with_symbols) - len(jm.complete)
2602 delta = len(jm.name_with_symbols) - len(jm.complete)
2544 if type_ == 'function':
2603 if type_ == 'function':
2545 signature = _make_signature(jm)
2604 signature = _make_signature(jm)
2546 else:
2605 else:
2547 signature = ''
2606 signature = ''
2548 yield Completion(start=offset - delta,
2607 yield Completion(start=offset - delta,
2549 end=offset,
2608 end=offset,
2550 text=jm.name_with_symbols,
2609 text=jm.name_with_symbols,
2551 type=type_,
2610 type=type_,
2552 signature=signature,
2611 signature=signature,
2553 _origin='jedi')
2612 _origin='jedi')
2554
2613
2555 if time.monotonic() > deadline:
2614 if time.monotonic() > deadline:
2556 break
2615 break
2557
2616
2558 for jm in iter_jm:
2617 for jm in iter_jm:
2559 delta = len(jm.name_with_symbols) - len(jm.complete)
2618 delta = len(jm.name_with_symbols) - len(jm.complete)
2560 yield Completion(
2619 yield Completion(
2561 start=offset - delta,
2620 start=offset - delta,
2562 end=offset,
2621 end=offset,
2563 text=jm.name_with_symbols,
2622 text=jm.name_with_symbols,
2564 type=_UNKNOWN_TYPE, # don't compute type for speed
2623 type=_UNKNOWN_TYPE, # don't compute type for speed
2565 _origin="jedi",
2624 _origin="jedi",
2566 signature="",
2625 signature="",
2567 )
2626 )
2568
2627
2569 # TODO:
2628 # TODO:
2570 # Suppress this, right now just for debug.
2629 # Suppress this, right now just for debug.
2571 if jedi_matches and non_jedi_results and self.debug:
2630 if jedi_matches and non_jedi_results and self.debug:
2572 some_start_offset = before.rfind(
2631 some_start_offset = before.rfind(
2573 next(iter(non_jedi_results.values()))["matched_fragment"]
2632 next(iter(non_jedi_results.values()))["matched_fragment"]
2574 )
2633 )
2575 yield Completion(
2634 yield Completion(
2576 start=some_start_offset,
2635 start=some_start_offset,
2577 end=offset,
2636 end=offset,
2578 text="--jedi/ipython--",
2637 text="--jedi/ipython--",
2579 _origin="debug",
2638 _origin="debug",
2580 type="none",
2639 type="none",
2581 signature="",
2640 signature="",
2582 )
2641 )
2583
2642
2584 ordered = []
2643 ordered = []
2585 sortable = []
2644 sortable = []
2586
2645
2587 for origin, result in non_jedi_results.items():
2646 for origin, result in non_jedi_results.items():
2588 matched_text = result["matched_fragment"]
2647 matched_text = result["matched_fragment"]
2589 start_offset = before.rfind(matched_text)
2648 start_offset = before.rfind(matched_text)
2590 is_ordered = result.get("ordered", False)
2649 is_ordered = result.get("ordered", False)
2591 container = ordered if is_ordered else sortable
2650 container = ordered if is_ordered else sortable
2592
2651
2593 # I'm unsure if this is always true, so let's assert and see if it
2652 # I'm unsure if this is always true, so let's assert and see if it
2594 # crash
2653 # crash
2595 assert before.endswith(matched_text)
2654 assert before.endswith(matched_text)
2596
2655
2597 for simple_completion in result["completions"]:
2656 for simple_completion in result["completions"]:
2598 completion = Completion(
2657 completion = Completion(
2599 start=start_offset,
2658 start=start_offset,
2600 end=offset,
2659 end=offset,
2601 text=simple_completion.text,
2660 text=simple_completion.text,
2602 _origin=origin,
2661 _origin=origin,
2603 signature="",
2662 signature="",
2604 type=simple_completion.type or _UNKNOWN_TYPE,
2663 type=simple_completion.type or _UNKNOWN_TYPE,
2605 )
2664 )
2606 container.append(completion)
2665 container.append(completion)
2607
2666
2608 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
2667 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
2609 :MATCHES_LIMIT
2668 :MATCHES_LIMIT
2610 ]
2669 ]
2611
2670
2612 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2671 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2613 """Find completions for the given text and line context.
2672 """Find completions for the given text and line context.
2614
2673
2615 Note that both the text and the line_buffer are optional, but at least
2674 Note that both the text and the line_buffer are optional, but at least
2616 one of them must be given.
2675 one of them must be given.
2617
2676
2618 Parameters
2677 Parameters
2619 ----------
2678 ----------
2620 text : string, optional
2679 text : string, optional
2621 Text to perform the completion on. If not given, the line buffer
2680 Text to perform the completion on. If not given, the line buffer
2622 is split using the instance's CompletionSplitter object.
2681 is split using the instance's CompletionSplitter object.
2623 line_buffer : string, optional
2682 line_buffer : string, optional
2624 If not given, the completer attempts to obtain the current line
2683 If not given, the completer attempts to obtain the current line
2625 buffer via readline. This keyword allows clients which are
2684 buffer via readline. This keyword allows clients which are
2626 requesting for text completions in non-readline contexts to inform
2685 requesting for text completions in non-readline contexts to inform
2627 the completer of the entire text.
2686 the completer of the entire text.
2628 cursor_pos : int, optional
2687 cursor_pos : int, optional
2629 Index of the cursor in the full line buffer. Should be provided by
2688 Index of the cursor in the full line buffer. Should be provided by
2630 remote frontends where kernel has no access to frontend state.
2689 remote frontends where kernel has no access to frontend state.
2631
2690
2632 Returns
2691 Returns
2633 -------
2692 -------
2634 Tuple of two items:
2693 Tuple of two items:
2635 text : str
2694 text : str
2636 Text that was actually used in the completion.
2695 Text that was actually used in the completion.
2637 matches : list
2696 matches : list
2638 A list of completion matches.
2697 A list of completion matches.
2639
2698
2640 Notes
2699 Notes
2641 -----
2700 -----
2642 This API is likely to be deprecated and replaced by
2701 This API is likely to be deprecated and replaced by
2643 :any:`IPCompleter.completions` in the future.
2702 :any:`IPCompleter.completions` in the future.
2644
2703
2645 """
2704 """
2646 warnings.warn('`Completer.complete` is pending deprecation since '
2705 warnings.warn('`Completer.complete` is pending deprecation since '
2647 'IPython 6.0 and will be replaced by `Completer.completions`.',
2706 'IPython 6.0 and will be replaced by `Completer.completions`.',
2648 PendingDeprecationWarning)
2707 PendingDeprecationWarning)
2649 # potential todo, FOLD the 3rd throw away argument of _complete
2708 # potential todo, FOLD the 3rd throw away argument of _complete
2650 # into the first 2 one.
2709 # into the first 2 one.
2651 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
2710 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
2652 # TODO: should we deprecate now, or does it stay?
2711 # TODO: should we deprecate now, or does it stay?
2653
2712
2654 results = self._complete(
2713 results = self._complete(
2655 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
2714 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
2656 )
2715 )
2657
2716
2658 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2717 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2659
2718
2660 return self._arrange_and_extract(
2719 return self._arrange_and_extract(
2661 results,
2720 results,
2662 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
2721 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
2663 skip_matchers={jedi_matcher_id},
2722 skip_matchers={jedi_matcher_id},
2664 # this API does not support different start/end positions (fragments of token).
2723 # this API does not support different start/end positions (fragments of token).
2665 abort_if_offset_changes=True,
2724 abort_if_offset_changes=True,
2666 )
2725 )
2667
2726
2668 def _arrange_and_extract(
2727 def _arrange_and_extract(
2669 self,
2728 self,
2670 results: Dict[str, MatcherResult],
2729 results: Dict[str, MatcherResult],
2671 skip_matchers: Set[str],
2730 skip_matchers: Set[str],
2672 abort_if_offset_changes: bool,
2731 abort_if_offset_changes: bool,
2673 ):
2732 ):
2674
2733
2675 sortable = []
2734 sortable = []
2676 ordered = []
2735 ordered = []
2677 most_recent_fragment = None
2736 most_recent_fragment = None
2678 for identifier, result in results.items():
2737 for identifier, result in results.items():
2679 if identifier in skip_matchers:
2738 if identifier in skip_matchers:
2680 continue
2739 continue
2681 if not result["completions"]:
2740 if not result["completions"]:
2682 continue
2741 continue
2683 if not most_recent_fragment:
2742 if not most_recent_fragment:
2684 most_recent_fragment = result["matched_fragment"]
2743 most_recent_fragment = result["matched_fragment"]
2685 if (
2744 if (
2686 abort_if_offset_changes
2745 abort_if_offset_changes
2687 and result["matched_fragment"] != most_recent_fragment
2746 and result["matched_fragment"] != most_recent_fragment
2688 ):
2747 ):
2689 break
2748 break
2690 if result.get("ordered", False):
2749 if result.get("ordered", False):
2691 ordered.extend(result["completions"])
2750 ordered.extend(result["completions"])
2692 else:
2751 else:
2693 sortable.extend(result["completions"])
2752 sortable.extend(result["completions"])
2694
2753
2695 if not most_recent_fragment:
2754 if not most_recent_fragment:
2696 most_recent_fragment = "" # to satisfy typechecker (and just in case)
2755 most_recent_fragment = "" # to satisfy typechecker (and just in case)
2697
2756
2698 return most_recent_fragment, [
2757 return most_recent_fragment, [
2699 m.text for m in self._deduplicate(ordered + self._sort(sortable))
2758 m.text for m in self._deduplicate(ordered + self._sort(sortable))
2700 ]
2759 ]
2701
2760
2702 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2761 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2703 full_text=None) -> _CompleteResult:
2762 full_text=None) -> _CompleteResult:
2704 """
2763 """
2705 Like complete but can also returns raw jedi completions as well as the
2764 Like complete but can also returns raw jedi completions as well as the
2706 origin of the completion text. This could (and should) be made much
2765 origin of the completion text. This could (and should) be made much
2707 cleaner but that will be simpler once we drop the old (and stateful)
2766 cleaner but that will be simpler once we drop the old (and stateful)
2708 :any:`complete` API.
2767 :any:`complete` API.
2709
2768
2710 With current provisional API, cursor_pos act both (depending on the
2769 With current provisional API, cursor_pos act both (depending on the
2711 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2770 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2712 ``column`` when passing multiline strings this could/should be renamed
2771 ``column`` when passing multiline strings this could/should be renamed
2713 but would add extra noise.
2772 but would add extra noise.
2714
2773
2715 Parameters
2774 Parameters
2716 ----------
2775 ----------
2717 cursor_line
2776 cursor_line
2718 Index of the line the cursor is on. 0 indexed.
2777 Index of the line the cursor is on. 0 indexed.
2719 cursor_pos
2778 cursor_pos
2720 Position of the cursor in the current line/line_buffer/text. 0
2779 Position of the cursor in the current line/line_buffer/text. 0
2721 indexed.
2780 indexed.
2722 line_buffer : optional, str
2781 line_buffer : optional, str
2723 The current line the cursor is in, this is mostly due to legacy
2782 The current line the cursor is in, this is mostly due to legacy
2724 reason that readline could only give a us the single current line.
2783 reason that readline could only give a us the single current line.
2725 Prefer `full_text`.
2784 Prefer `full_text`.
2726 text : str
2785 text : str
2727 The current "token" the cursor is in, mostly also for historical
2786 The current "token" the cursor is in, mostly also for historical
2728 reasons. as the completer would trigger only after the current line
2787 reasons. as the completer would trigger only after the current line
2729 was parsed.
2788 was parsed.
2730 full_text : str
2789 full_text : str
2731 Full text of the current cell.
2790 Full text of the current cell.
2732
2791
2733 Returns
2792 Returns
2734 -------
2793 -------
2735 An ordered dictionary where keys are identifiers of completion
2794 An ordered dictionary where keys are identifiers of completion
2736 matchers and values are ``MatcherResult``s.
2795 matchers and values are ``MatcherResult``s.
2737 """
2796 """
2738
2797
2739 # if the cursor position isn't given, the only sane assumption we can
2798 # if the cursor position isn't given, the only sane assumption we can
2740 # make is that it's at the end of the line (the common case)
2799 # make is that it's at the end of the line (the common case)
2741 if cursor_pos is None:
2800 if cursor_pos is None:
2742 cursor_pos = len(line_buffer) if text is None else len(text)
2801 cursor_pos = len(line_buffer) if text is None else len(text)
2743
2802
2744 if self.use_main_ns:
2803 if self.use_main_ns:
2745 self.namespace = __main__.__dict__
2804 self.namespace = __main__.__dict__
2746
2805
2747 # if text is either None or an empty string, rely on the line buffer
2806 # if text is either None or an empty string, rely on the line buffer
2748 if (not line_buffer) and full_text:
2807 if (not line_buffer) and full_text:
2749 line_buffer = full_text.split('\n')[cursor_line]
2808 line_buffer = full_text.split('\n')[cursor_line]
2750 if not text: # issue #11508: check line_buffer before calling split_line
2809 if not text: # issue #11508: check line_buffer before calling split_line
2751 text = (
2810 text = (
2752 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
2811 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
2753 )
2812 )
2754
2813
2755 # If no line buffer is given, assume the input text is all there was
2814 # If no line buffer is given, assume the input text is all there was
2756 if line_buffer is None:
2815 if line_buffer is None:
2757 line_buffer = text
2816 line_buffer = text
2758
2817
2759 # deprecated - do not use `line_buffer` in new code.
2818 # deprecated - do not use `line_buffer` in new code.
2760 self.line_buffer = line_buffer
2819 self.line_buffer = line_buffer
2761 self.text_until_cursor = self.line_buffer[:cursor_pos]
2820 self.text_until_cursor = self.line_buffer[:cursor_pos]
2762
2821
2763 if not full_text:
2822 if not full_text:
2764 full_text = line_buffer
2823 full_text = line_buffer
2765
2824
2766 context = CompletionContext(
2825 context = CompletionContext(
2767 full_text=full_text,
2826 full_text=full_text,
2768 cursor_position=cursor_pos,
2827 cursor_position=cursor_pos,
2769 cursor_line=cursor_line,
2828 cursor_line=cursor_line,
2770 token=text,
2829 token=text,
2771 limit=MATCHES_LIMIT,
2830 limit=MATCHES_LIMIT,
2772 )
2831 )
2773
2832
2774 # Start with a clean slate of completions
2833 # Start with a clean slate of completions
2775 results = {}
2834 results = {}
2776
2835
2777 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2836 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2778
2837
2779 suppressed_matchers = set()
2838 suppressed_matchers = set()
2780
2839
2781 matchers = {
2840 matchers = {
2782 _get_matcher_id(matcher): matcher
2841 _get_matcher_id(matcher): matcher
2783 for matcher in sorted(
2842 for matcher in sorted(
2784 self.matchers, key=_get_matcher_priority, reverse=True
2843 self.matchers, key=_get_matcher_priority, reverse=True
2785 )
2844 )
2786 }
2845 }
2787
2846
2788 for matcher_id, matcher in matchers.items():
2847 for matcher_id, matcher in matchers.items():
2789 api_version = _get_matcher_api_version(matcher)
2848 api_version = _get_matcher_api_version(matcher)
2790 matcher_id = _get_matcher_id(matcher)
2849 matcher_id = _get_matcher_id(matcher)
2791
2850
2792 if matcher_id in self.disable_matchers:
2851 if matcher_id in self.disable_matchers:
2793 continue
2852 continue
2794
2853
2795 if matcher_id in results:
2854 if matcher_id in results:
2796 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
2855 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
2797
2856
2798 if matcher_id in suppressed_matchers:
2857 if matcher_id in suppressed_matchers:
2799 continue
2858 continue
2800
2859
2801 try:
2860 try:
2802 if api_version == 1:
2861 if api_version == 1:
2803 result = _convert_matcher_v1_result_to_v2(
2862 result = _convert_matcher_v1_result_to_v2(
2804 matcher(text), type=_UNKNOWN_TYPE
2863 matcher(text), type=_UNKNOWN_TYPE
2805 )
2864 )
2806 elif api_version == 2:
2865 elif api_version == 2:
2807 result = cast(matcher, MatcherAPIv2)(context)
2866 result = cast(matcher, MatcherAPIv2)(context)
2808 else:
2867 else:
2809 raise ValueError(f"Unsupported API version {api_version}")
2868 raise ValueError(f"Unsupported API version {api_version}")
2810 except:
2869 except:
2811 # Show the ugly traceback if the matcher causes an
2870 # Show the ugly traceback if the matcher causes an
2812 # exception, but do NOT crash the kernel!
2871 # exception, but do NOT crash the kernel!
2813 sys.excepthook(*sys.exc_info())
2872 sys.excepthook(*sys.exc_info())
2814 continue
2873 continue
2815
2874
2816 # set default value for matched fragment if suffix was not selected.
2875 # set default value for matched fragment if suffix was not selected.
2817 result["matched_fragment"] = result.get("matched_fragment", context.token)
2876 result["matched_fragment"] = result.get("matched_fragment", context.token)
2818
2877
2819 if not suppressed_matchers:
2878 if not suppressed_matchers:
2820 suppression_recommended = result.get("suppress", False)
2879 suppression_recommended = result.get("suppress", False)
2821
2880
2822 suppression_config = (
2881 suppression_config = (
2823 self.suppress_competing_matchers.get(matcher_id, None)
2882 self.suppress_competing_matchers.get(matcher_id, None)
2824 if isinstance(self.suppress_competing_matchers, dict)
2883 if isinstance(self.suppress_competing_matchers, dict)
2825 else self.suppress_competing_matchers
2884 else self.suppress_competing_matchers
2826 )
2885 )
2827 should_suppress = (
2886 should_suppress = (
2828 (suppression_config is True)
2887 (suppression_config is True)
2829 or (suppression_recommended and (suppression_config is not False))
2888 or (suppression_recommended and (suppression_config is not False))
2830 ) and has_any_completions(result)
2889 ) and has_any_completions(result)
2831
2890
2832 if should_suppress:
2891 if should_suppress:
2833 suppression_exceptions = result.get("do_not_suppress", set())
2892 suppression_exceptions = result.get("do_not_suppress", set())
2834 try:
2893 try:
2835 to_suppress = set(suppression_recommended)
2894 to_suppress = set(suppression_recommended)
2836 except TypeError:
2895 except TypeError:
2837 to_suppress = set(matchers)
2896 to_suppress = set(matchers)
2838 suppressed_matchers = to_suppress - suppression_exceptions
2897 suppressed_matchers = to_suppress - suppression_exceptions
2839
2898
2840 new_results = {}
2899 new_results = {}
2841 for previous_matcher_id, previous_result in results.items():
2900 for previous_matcher_id, previous_result in results.items():
2842 if previous_matcher_id not in suppressed_matchers:
2901 if previous_matcher_id not in suppressed_matchers:
2843 new_results[previous_matcher_id] = previous_result
2902 new_results[previous_matcher_id] = previous_result
2844 results = new_results
2903 results = new_results
2845
2904
2846 results[matcher_id] = result
2905 results[matcher_id] = result
2847
2906
2848 _, matches = self._arrange_and_extract(
2907 _, matches = self._arrange_and_extract(
2849 results,
2908 results,
2850 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
2909 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
2851 # if it was omission, we can remove the filtering step, otherwise remove this comment.
2910 # if it was omission, we can remove the filtering step, otherwise remove this comment.
2852 skip_matchers={jedi_matcher_id},
2911 skip_matchers={jedi_matcher_id},
2853 abort_if_offset_changes=False,
2912 abort_if_offset_changes=False,
2854 )
2913 )
2855
2914
2856 # populate legacy stateful API
2915 # populate legacy stateful API
2857 self.matches = matches
2916 self.matches = matches
2858
2917
2859 return results
2918 return results
2860
2919
2861 @staticmethod
2920 @staticmethod
2862 def _deduplicate(
2921 def _deduplicate(
2863 matches: Sequence[SimpleCompletion],
2922 matches: Sequence[SimpleCompletion],
2864 ) -> Iterable[SimpleCompletion]:
2923 ) -> Iterable[SimpleCompletion]:
2865 filtered_matches = {}
2924 filtered_matches = {}
2866 for match in matches:
2925 for match in matches:
2867 text = match.text
2926 text = match.text
2868 if (
2927 if (
2869 text not in filtered_matches
2928 text not in filtered_matches
2870 or filtered_matches[text].type == _UNKNOWN_TYPE
2929 or filtered_matches[text].type == _UNKNOWN_TYPE
2871 ):
2930 ):
2872 filtered_matches[text] = match
2931 filtered_matches[text] = match
2873
2932
2874 return filtered_matches.values()
2933 return filtered_matches.values()
2875
2934
2876 @staticmethod
2935 @staticmethod
2877 def _sort(matches: Sequence[SimpleCompletion]):
2936 def _sort(matches: Sequence[SimpleCompletion]):
2878 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
2937 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
2879
2938
2880 @context_matcher()
2939 @context_matcher()
2881 def fwd_unicode_matcher(self, context: CompletionContext):
2940 def fwd_unicode_matcher(self, context: CompletionContext):
2882 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
2941 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
2883 # TODO: use `context.limit` to terminate early once we matched the maximum
2942 # TODO: use `context.limit` to terminate early once we matched the maximum
2884 # number that will be used downstream; can be added as an optional to
2943 # number that will be used downstream; can be added as an optional to
2885 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
2944 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
2886 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
2945 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
2887 return _convert_matcher_v1_result_to_v2(
2946 return _convert_matcher_v1_result_to_v2(
2888 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2947 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2889 )
2948 )
2890
2949
2891 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
2950 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
2892 """
2951 """
2893 Forward match a string starting with a backslash with a list of
2952 Forward match a string starting with a backslash with a list of
2894 potential Unicode completions.
2953 potential Unicode completions.
2895
2954
2896 Will compute list of Unicode character names on first call and cache it.
2955 Will compute list of Unicode character names on first call and cache it.
2897
2956
2898 .. deprecated:: 8.6
2957 .. deprecated:: 8.6
2899 You can use :meth:`fwd_unicode_matcher` instead.
2958 You can use :meth:`fwd_unicode_matcher` instead.
2900
2959
2901 Returns
2960 Returns
2902 -------
2961 -------
2903 At tuple with:
2962 At tuple with:
2904 - matched text (empty if no matches)
2963 - matched text (empty if no matches)
2905 - list of potential completions, empty tuple otherwise)
2964 - list of potential completions, empty tuple otherwise)
2906 """
2965 """
2907 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2966 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2908 # We could do a faster match using a Trie.
2967 # We could do a faster match using a Trie.
2909
2968
2910 # Using pygtrie the following seem to work:
2969 # Using pygtrie the following seem to work:
2911
2970
2912 # s = PrefixSet()
2971 # s = PrefixSet()
2913
2972
2914 # for c in range(0,0x10FFFF + 1):
2973 # for c in range(0,0x10FFFF + 1):
2915 # try:
2974 # try:
2916 # s.add(unicodedata.name(chr(c)))
2975 # s.add(unicodedata.name(chr(c)))
2917 # except ValueError:
2976 # except ValueError:
2918 # pass
2977 # pass
2919 # [''.join(k) for k in s.iter(prefix)]
2978 # [''.join(k) for k in s.iter(prefix)]
2920
2979
2921 # But need to be timed and adds an extra dependency.
2980 # But need to be timed and adds an extra dependency.
2922
2981
2923 slashpos = text.rfind('\\')
2982 slashpos = text.rfind('\\')
2924 # if text starts with slash
2983 # if text starts with slash
2925 if slashpos > -1:
2984 if slashpos > -1:
2926 # PERF: It's important that we don't access self._unicode_names
2985 # PERF: It's important that we don't access self._unicode_names
2927 # until we're inside this if-block. _unicode_names is lazily
2986 # until we're inside this if-block. _unicode_names is lazily
2928 # initialized, and it takes a user-noticeable amount of time to
2987 # initialized, and it takes a user-noticeable amount of time to
2929 # initialize it, so we don't want to initialize it unless we're
2988 # initialize it, so we don't want to initialize it unless we're
2930 # actually going to use it.
2989 # actually going to use it.
2931 s = text[slashpos + 1 :]
2990 s = text[slashpos + 1 :]
2932 sup = s.upper()
2991 sup = s.upper()
2933 candidates = [x for x in self.unicode_names if x.startswith(sup)]
2992 candidates = [x for x in self.unicode_names if x.startswith(sup)]
2934 if candidates:
2993 if candidates:
2935 return s, candidates
2994 return s, candidates
2936 candidates = [x for x in self.unicode_names if sup in x]
2995 candidates = [x for x in self.unicode_names if sup in x]
2937 if candidates:
2996 if candidates:
2938 return s, candidates
2997 return s, candidates
2939 splitsup = sup.split(" ")
2998 splitsup = sup.split(" ")
2940 candidates = [
2999 candidates = [
2941 x for x in self.unicode_names if all(u in x for u in splitsup)
3000 x for x in self.unicode_names if all(u in x for u in splitsup)
2942 ]
3001 ]
2943 if candidates:
3002 if candidates:
2944 return s, candidates
3003 return s, candidates
2945
3004
2946 return "", ()
3005 return "", ()
2947
3006
2948 # if text does not start with slash
3007 # if text does not start with slash
2949 else:
3008 else:
2950 return '', ()
3009 return '', ()
2951
3010
2952 @property
3011 @property
2953 def unicode_names(self) -> List[str]:
3012 def unicode_names(self) -> List[str]:
2954 """List of names of unicode code points that can be completed.
3013 """List of names of unicode code points that can be completed.
2955
3014
2956 The list is lazily initialized on first access.
3015 The list is lazily initialized on first access.
2957 """
3016 """
2958 if self._unicode_names is None:
3017 if self._unicode_names is None:
2959 names = []
3018 names = []
2960 for c in range(0,0x10FFFF + 1):
3019 for c in range(0,0x10FFFF + 1):
2961 try:
3020 try:
2962 names.append(unicodedata.name(chr(c)))
3021 names.append(unicodedata.name(chr(c)))
2963 except ValueError:
3022 except ValueError:
2964 pass
3023 pass
2965 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
3024 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2966
3025
2967 return self._unicode_names
3026 return self._unicode_names
2968
3027
2969 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
3028 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2970 names = []
3029 names = []
2971 for start,stop in ranges:
3030 for start,stop in ranges:
2972 for c in range(start, stop) :
3031 for c in range(start, stop) :
2973 try:
3032 try:
2974 names.append(unicodedata.name(chr(c)))
3033 names.append(unicodedata.name(chr(c)))
2975 except ValueError:
3034 except ValueError:
2976 pass
3035 pass
2977 return names
3036 return names
@@ -1,1505 +1,1547 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import pytest
8 import pytest
9 import sys
9 import sys
10 import textwrap
10 import textwrap
11 import unittest
11 import unittest
12
12
13 from contextlib import contextmanager
13 from contextlib import contextmanager
14
14
15 from traitlets.config.loader import Config
15 from traitlets.config.loader import Config
16 from IPython import get_ipython
16 from IPython import get_ipython
17 from IPython.core import completer
17 from IPython.core import completer
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.generics import complete_object
19 from IPython.utils.generics import complete_object
20 from IPython.testing import decorators as dec
20 from IPython.testing import decorators as dec
21
21
22 from IPython.core.completer import (
22 from IPython.core.completer import (
23 Completion,
23 Completion,
24 provisionalcompleter,
24 provisionalcompleter,
25 match_dict_keys,
25 match_dict_keys,
26 _deduplicate_completions,
26 _deduplicate_completions,
27 completion_matcher,
27 completion_matcher,
28 SimpleCompletion,
28 SimpleCompletion,
29 CompletionContext,
29 CompletionContext,
30 )
30 )
31
31
32 # -----------------------------------------------------------------------------
32 # -----------------------------------------------------------------------------
33 # Test functions
33 # Test functions
34 # -----------------------------------------------------------------------------
34 # -----------------------------------------------------------------------------
35
35
36 def recompute_unicode_ranges():
36 def recompute_unicode_ranges():
37 """
37 """
38 utility to recompute the largest unicode range without any characters
38 utility to recompute the largest unicode range without any characters
39
39
40 use to recompute the gap in the global _UNICODE_RANGES of completer.py
40 use to recompute the gap in the global _UNICODE_RANGES of completer.py
41 """
41 """
42 import itertools
42 import itertools
43 import unicodedata
43 import unicodedata
44 valid = []
44 valid = []
45 for c in range(0,0x10FFFF + 1):
45 for c in range(0,0x10FFFF + 1):
46 try:
46 try:
47 unicodedata.name(chr(c))
47 unicodedata.name(chr(c))
48 except ValueError:
48 except ValueError:
49 continue
49 continue
50 valid.append(c)
50 valid.append(c)
51
51
52 def ranges(i):
52 def ranges(i):
53 for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
53 for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
54 b = list(b)
54 b = list(b)
55 yield b[0][1], b[-1][1]
55 yield b[0][1], b[-1][1]
56
56
57 rg = list(ranges(valid))
57 rg = list(ranges(valid))
58 lens = []
58 lens = []
59 gap_lens = []
59 gap_lens = []
60 pstart, pstop = 0,0
60 pstart, pstop = 0,0
61 for start, stop in rg:
61 for start, stop in rg:
62 lens.append(stop-start)
62 lens.append(stop-start)
63 gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%'))
63 gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%'))
64 pstart, pstop = start, stop
64 pstart, pstop = start, stop
65
65
66 return sorted(gap_lens)[-1]
66 return sorted(gap_lens)[-1]
67
67
68
68
69
69
70 def test_unicode_range():
70 def test_unicode_range():
71 """
71 """
72 Test that the ranges we test for unicode names give the same number of
72 Test that the ranges we test for unicode names give the same number of
73 results than testing the full length.
73 results than testing the full length.
74 """
74 """
75 from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES
75 from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES
76
76
77 expected_list = _unicode_name_compute([(0, 0x110000)])
77 expected_list = _unicode_name_compute([(0, 0x110000)])
78 test = _unicode_name_compute(_UNICODE_RANGES)
78 test = _unicode_name_compute(_UNICODE_RANGES)
79 len_exp = len(expected_list)
79 len_exp = len(expected_list)
80 len_test = len(test)
80 len_test = len(test)
81
81
82 # do not inline the len() or on error pytest will try to print the 130 000 +
82 # do not inline the len() or on error pytest will try to print the 130 000 +
83 # elements.
83 # elements.
84 message = None
84 message = None
85 if len_exp != len_test or len_exp > 131808:
85 if len_exp != len_test or len_exp > 131808:
86 size, start, stop, prct = recompute_unicode_ranges()
86 size, start, stop, prct = recompute_unicode_ranges()
87 message = f"""_UNICODE_RANGES likely wrong and need updating. This is
87 message = f"""_UNICODE_RANGES likely wrong and need updating. This is
88 likely due to a new release of Python. We've find that the biggest gap
88 likely due to a new release of Python. We've find that the biggest gap
89 in unicode characters has reduces in size to be {size} characters
89 in unicode characters has reduces in size to be {size} characters
90 ({prct}), from {start}, to {stop}. In completer.py likely update to
90 ({prct}), from {start}, to {stop}. In completer.py likely update to
91
91
92 _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)]
92 _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)]
93
93
94 And update the assertion below to use
94 And update the assertion below to use
95
95
96 len_exp <= {len_exp}
96 len_exp <= {len_exp}
97 """
97 """
98 assert len_exp == len_test, message
98 assert len_exp == len_test, message
99
99
100 # fail if new unicode symbols have been added.
100 # fail if new unicode symbols have been added.
101 assert len_exp <= 138552, message
101 assert len_exp <= 138552, message
102
102
103
103
104 @contextmanager
104 @contextmanager
105 def greedy_completion():
105 def greedy_completion():
106 ip = get_ipython()
106 ip = get_ipython()
107 greedy_original = ip.Completer.greedy
107 greedy_original = ip.Completer.greedy
108 try:
108 try:
109 ip.Completer.greedy = True
109 ip.Completer.greedy = True
110 yield
110 yield
111 finally:
111 finally:
112 ip.Completer.greedy = greedy_original
112 ip.Completer.greedy = greedy_original
113
113
114
114
115 @contextmanager
115 @contextmanager
116 def evaluation_level(evaluation: str):
117 ip = get_ipython()
118 evaluation_original = ip.Completer.evaluation
119 try:
120 ip.Completer.evaluation = evaluation
121 yield
122 finally:
123 ip.Completer.evaluation = evaluation_original
124
125
126 @contextmanager
116 def custom_matchers(matchers):
127 def custom_matchers(matchers):
117 ip = get_ipython()
128 ip = get_ipython()
118 try:
129 try:
119 ip.Completer.custom_matchers.extend(matchers)
130 ip.Completer.custom_matchers.extend(matchers)
120 yield
131 yield
121 finally:
132 finally:
122 ip.Completer.custom_matchers.clear()
133 ip.Completer.custom_matchers.clear()
123
134
124
135
125 def test_protect_filename():
136 def test_protect_filename():
126 if sys.platform == "win32":
137 if sys.platform == "win32":
127 pairs = [
138 pairs = [
128 ("abc", "abc"),
139 ("abc", "abc"),
129 (" abc", '" abc"'),
140 (" abc", '" abc"'),
130 ("a bc", '"a bc"'),
141 ("a bc", '"a bc"'),
131 ("a bc", '"a bc"'),
142 ("a bc", '"a bc"'),
132 (" bc", '" bc"'),
143 (" bc", '" bc"'),
133 ]
144 ]
134 else:
145 else:
135 pairs = [
146 pairs = [
136 ("abc", "abc"),
147 ("abc", "abc"),
137 (" abc", r"\ abc"),
148 (" abc", r"\ abc"),
138 ("a bc", r"a\ bc"),
149 ("a bc", r"a\ bc"),
139 ("a bc", r"a\ \ bc"),
150 ("a bc", r"a\ \ bc"),
140 (" bc", r"\ \ bc"),
151 (" bc", r"\ \ bc"),
141 # On posix, we also protect parens and other special characters.
152 # On posix, we also protect parens and other special characters.
142 ("a(bc", r"a\(bc"),
153 ("a(bc", r"a\(bc"),
143 ("a)bc", r"a\)bc"),
154 ("a)bc", r"a\)bc"),
144 ("a( )bc", r"a\(\ \)bc"),
155 ("a( )bc", r"a\(\ \)bc"),
145 ("a[1]bc", r"a\[1\]bc"),
156 ("a[1]bc", r"a\[1\]bc"),
146 ("a{1}bc", r"a\{1\}bc"),
157 ("a{1}bc", r"a\{1\}bc"),
147 ("a#bc", r"a\#bc"),
158 ("a#bc", r"a\#bc"),
148 ("a?bc", r"a\?bc"),
159 ("a?bc", r"a\?bc"),
149 ("a=bc", r"a\=bc"),
160 ("a=bc", r"a\=bc"),
150 ("a\\bc", r"a\\bc"),
161 ("a\\bc", r"a\\bc"),
151 ("a|bc", r"a\|bc"),
162 ("a|bc", r"a\|bc"),
152 ("a;bc", r"a\;bc"),
163 ("a;bc", r"a\;bc"),
153 ("a:bc", r"a\:bc"),
164 ("a:bc", r"a\:bc"),
154 ("a'bc", r"a\'bc"),
165 ("a'bc", r"a\'bc"),
155 ("a*bc", r"a\*bc"),
166 ("a*bc", r"a\*bc"),
156 ('a"bc', r"a\"bc"),
167 ('a"bc', r"a\"bc"),
157 ("a^bc", r"a\^bc"),
168 ("a^bc", r"a\^bc"),
158 ("a&bc", r"a\&bc"),
169 ("a&bc", r"a\&bc"),
159 ]
170 ]
160 # run the actual tests
171 # run the actual tests
161 for s1, s2 in pairs:
172 for s1, s2 in pairs:
162 s1p = completer.protect_filename(s1)
173 s1p = completer.protect_filename(s1)
163 assert s1p == s2
174 assert s1p == s2
164
175
165
176
166 def check_line_split(splitter, test_specs):
177 def check_line_split(splitter, test_specs):
167 for part1, part2, split in test_specs:
178 for part1, part2, split in test_specs:
168 cursor_pos = len(part1)
179 cursor_pos = len(part1)
169 line = part1 + part2
180 line = part1 + part2
170 out = splitter.split_line(line, cursor_pos)
181 out = splitter.split_line(line, cursor_pos)
171 assert out == split
182 assert out == split
172
183
173
184
174 def test_line_split():
185 def test_line_split():
175 """Basic line splitter test with default specs."""
186 """Basic line splitter test with default specs."""
176 sp = completer.CompletionSplitter()
187 sp = completer.CompletionSplitter()
177 # The format of the test specs is: part1, part2, expected answer. Parts 1
188 # The format of the test specs is: part1, part2, expected answer. Parts 1
178 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
189 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
179 # was at the end of part1. So an empty part2 represents someone hitting
190 # was at the end of part1. So an empty part2 represents someone hitting
180 # tab at the end of the line, the most common case.
191 # tab at the end of the line, the most common case.
181 t = [
192 t = [
182 ("run some/scrip", "", "some/scrip"),
193 ("run some/scrip", "", "some/scrip"),
183 ("run scripts/er", "ror.py foo", "scripts/er"),
194 ("run scripts/er", "ror.py foo", "scripts/er"),
184 ("echo $HOM", "", "HOM"),
195 ("echo $HOM", "", "HOM"),
185 ("print sys.pa", "", "sys.pa"),
196 ("print sys.pa", "", "sys.pa"),
186 ("print(sys.pa", "", "sys.pa"),
197 ("print(sys.pa", "", "sys.pa"),
187 ("execfile('scripts/er", "", "scripts/er"),
198 ("execfile('scripts/er", "", "scripts/er"),
188 ("a[x.", "", "x."),
199 ("a[x.", "", "x."),
189 ("a[x.", "y", "x."),
200 ("a[x.", "y", "x."),
190 ('cd "some_file/', "", "some_file/"),
201 ('cd "some_file/', "", "some_file/"),
191 ]
202 ]
192 check_line_split(sp, t)
203 check_line_split(sp, t)
193 # Ensure splitting works OK with unicode by re-running the tests with
204 # Ensure splitting works OK with unicode by re-running the tests with
194 # all inputs turned into unicode
205 # all inputs turned into unicode
195 check_line_split(sp, [map(str, p) for p in t])
206 check_line_split(sp, [map(str, p) for p in t])
196
207
197
208
198 class NamedInstanceClass:
209 class NamedInstanceClass:
199 instances = {}
210 instances = {}
200
211
201 def __init__(self, name):
212 def __init__(self, name):
202 self.instances[name] = self
213 self.instances[name] = self
203
214
204 @classmethod
215 @classmethod
205 def _ipython_key_completions_(cls):
216 def _ipython_key_completions_(cls):
206 return cls.instances.keys()
217 return cls.instances.keys()
207
218
208
219
209 class KeyCompletable:
220 class KeyCompletable:
210 def __init__(self, things=()):
221 def __init__(self, things=()):
211 self.things = things
222 self.things = things
212
223
213 def _ipython_key_completions_(self):
224 def _ipython_key_completions_(self):
214 return list(self.things)
225 return list(self.things)
215
226
216
227
217 class TestCompleter(unittest.TestCase):
228 class TestCompleter(unittest.TestCase):
218 def setUp(self):
229 def setUp(self):
219 """
230 """
220 We want to silence all PendingDeprecationWarning when testing the completer
231 We want to silence all PendingDeprecationWarning when testing the completer
221 """
232 """
222 self._assertwarns = self.assertWarns(PendingDeprecationWarning)
233 self._assertwarns = self.assertWarns(PendingDeprecationWarning)
223 self._assertwarns.__enter__()
234 self._assertwarns.__enter__()
224
235
225 def tearDown(self):
236 def tearDown(self):
226 try:
237 try:
227 self._assertwarns.__exit__(None, None, None)
238 self._assertwarns.__exit__(None, None, None)
228 except AssertionError:
239 except AssertionError:
229 pass
240 pass
230
241
231 def test_custom_completion_error(self):
242 def test_custom_completion_error(self):
232 """Test that errors from custom attribute completers are silenced."""
243 """Test that errors from custom attribute completers are silenced."""
233 ip = get_ipython()
244 ip = get_ipython()
234
245
235 class A:
246 class A:
236 pass
247 pass
237
248
238 ip.user_ns["x"] = A()
249 ip.user_ns["x"] = A()
239
250
240 @complete_object.register(A)
251 @complete_object.register(A)
241 def complete_A(a, existing_completions):
252 def complete_A(a, existing_completions):
242 raise TypeError("this should be silenced")
253 raise TypeError("this should be silenced")
243
254
244 ip.complete("x.")
255 ip.complete("x.")
245
256
246 def test_custom_completion_ordering(self):
257 def test_custom_completion_ordering(self):
247 """Test that errors from custom attribute completers are silenced."""
258 """Test that errors from custom attribute completers are silenced."""
248 ip = get_ipython()
259 ip = get_ipython()
249
260
250 _, matches = ip.complete('in')
261 _, matches = ip.complete('in')
251 assert matches.index('input') < matches.index('int')
262 assert matches.index('input') < matches.index('int')
252
263
253 def complete_example(a):
264 def complete_example(a):
254 return ['example2', 'example1']
265 return ['example2', 'example1']
255
266
256 ip.Completer.custom_completers.add_re('ex*', complete_example)
267 ip.Completer.custom_completers.add_re('ex*', complete_example)
257 _, matches = ip.complete('ex')
268 _, matches = ip.complete('ex')
258 assert matches.index('example2') < matches.index('example1')
269 assert matches.index('example2') < matches.index('example1')
259
270
260 def test_unicode_completions(self):
271 def test_unicode_completions(self):
261 ip = get_ipython()
272 ip = get_ipython()
262 # Some strings that trigger different types of completion. Check them both
273 # Some strings that trigger different types of completion. Check them both
263 # in str and unicode forms
274 # in str and unicode forms
264 s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
275 s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
265 for t in s + list(map(str, s)):
276 for t in s + list(map(str, s)):
266 # We don't need to check exact completion values (they may change
277 # We don't need to check exact completion values (they may change
267 # depending on the state of the namespace, but at least no exceptions
278 # depending on the state of the namespace, but at least no exceptions
268 # should be thrown and the return value should be a pair of text, list
279 # should be thrown and the return value should be a pair of text, list
269 # values.
280 # values.
270 text, matches = ip.complete(t)
281 text, matches = ip.complete(t)
271 self.assertIsInstance(text, str)
282 self.assertIsInstance(text, str)
272 self.assertIsInstance(matches, list)
283 self.assertIsInstance(matches, list)
273
284
274 def test_latex_completions(self):
285 def test_latex_completions(self):
275 from IPython.core.latex_symbols import latex_symbols
286 from IPython.core.latex_symbols import latex_symbols
276 import random
287 import random
277
288
278 ip = get_ipython()
289 ip = get_ipython()
279 # Test some random unicode symbols
290 # Test some random unicode symbols
280 keys = random.sample(sorted(latex_symbols), 10)
291 keys = random.sample(sorted(latex_symbols), 10)
281 for k in keys:
292 for k in keys:
282 text, matches = ip.complete(k)
293 text, matches = ip.complete(k)
283 self.assertEqual(text, k)
294 self.assertEqual(text, k)
284 self.assertEqual(matches, [latex_symbols[k]])
295 self.assertEqual(matches, [latex_symbols[k]])
285 # Test a more complex line
296 # Test a more complex line
286 text, matches = ip.complete("print(\\alpha")
297 text, matches = ip.complete("print(\\alpha")
287 self.assertEqual(text, "\\alpha")
298 self.assertEqual(text, "\\alpha")
288 self.assertEqual(matches[0], latex_symbols["\\alpha"])
299 self.assertEqual(matches[0], latex_symbols["\\alpha"])
289 # Test multiple matching latex symbols
300 # Test multiple matching latex symbols
290 text, matches = ip.complete("\\al")
301 text, matches = ip.complete("\\al")
291 self.assertIn("\\alpha", matches)
302 self.assertIn("\\alpha", matches)
292 self.assertIn("\\aleph", matches)
303 self.assertIn("\\aleph", matches)
293
304
294 def test_latex_no_results(self):
305 def test_latex_no_results(self):
295 """
306 """
296 forward latex should really return nothing in either field if nothing is found.
307 forward latex should really return nothing in either field if nothing is found.
297 """
308 """
298 ip = get_ipython()
309 ip = get_ipython()
299 text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
310 text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
300 self.assertEqual(text, "")
311 self.assertEqual(text, "")
301 self.assertEqual(matches, ())
312 self.assertEqual(matches, ())
302
313
303 def test_back_latex_completion(self):
314 def test_back_latex_completion(self):
304 ip = get_ipython()
315 ip = get_ipython()
305
316
306 # do not return more than 1 matches for \beta, only the latex one.
317 # do not return more than 1 matches for \beta, only the latex one.
307 name, matches = ip.complete("\\Ξ²")
318 name, matches = ip.complete("\\Ξ²")
308 self.assertEqual(matches, ["\\beta"])
319 self.assertEqual(matches, ["\\beta"])
309
320
310 def test_back_unicode_completion(self):
321 def test_back_unicode_completion(self):
311 ip = get_ipython()
322 ip = get_ipython()
312
323
313 name, matches = ip.complete("\\β…€")
324 name, matches = ip.complete("\\β…€")
314 self.assertEqual(matches, ["\\ROMAN NUMERAL FIVE"])
325 self.assertEqual(matches, ["\\ROMAN NUMERAL FIVE"])
315
326
316 def test_forward_unicode_completion(self):
327 def test_forward_unicode_completion(self):
317 ip = get_ipython()
328 ip = get_ipython()
318
329
319 name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
330 name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
320 self.assertEqual(matches, ["β…€"]) # This is not a V
331 self.assertEqual(matches, ["β…€"]) # This is not a V
321 self.assertEqual(matches, ["\u2164"]) # same as above but explicit.
332 self.assertEqual(matches, ["\u2164"]) # same as above but explicit.
322
333
323 def test_delim_setting(self):
334 def test_delim_setting(self):
324 sp = completer.CompletionSplitter()
335 sp = completer.CompletionSplitter()
325 sp.delims = " "
336 sp.delims = " "
326 self.assertEqual(sp.delims, " ")
337 self.assertEqual(sp.delims, " ")
327 self.assertEqual(sp._delim_expr, r"[\ ]")
338 self.assertEqual(sp._delim_expr, r"[\ ]")
328
339
329 def test_spaces(self):
340 def test_spaces(self):
330 """Test with only spaces as split chars."""
341 """Test with only spaces as split chars."""
331 sp = completer.CompletionSplitter()
342 sp = completer.CompletionSplitter()
332 sp.delims = " "
343 sp.delims = " "
333 t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
344 t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
334 check_line_split(sp, t)
345 check_line_split(sp, t)
335
346
336 def test_has_open_quotes1(self):
347 def test_has_open_quotes1(self):
337 for s in ["'", "'''", "'hi' '"]:
348 for s in ["'", "'''", "'hi' '"]:
338 self.assertEqual(completer.has_open_quotes(s), "'")
349 self.assertEqual(completer.has_open_quotes(s), "'")
339
350
340 def test_has_open_quotes2(self):
351 def test_has_open_quotes2(self):
341 for s in ['"', '"""', '"hi" "']:
352 for s in ['"', '"""', '"hi" "']:
342 self.assertEqual(completer.has_open_quotes(s), '"')
353 self.assertEqual(completer.has_open_quotes(s), '"')
343
354
344 def test_has_open_quotes3(self):
355 def test_has_open_quotes3(self):
345 for s in ["''", "''' '''", "'hi' 'ipython'"]:
356 for s in ["''", "''' '''", "'hi' 'ipython'"]:
346 self.assertFalse(completer.has_open_quotes(s))
357 self.assertFalse(completer.has_open_quotes(s))
347
358
348 def test_has_open_quotes4(self):
359 def test_has_open_quotes4(self):
349 for s in ['""', '""" """', '"hi" "ipython"']:
360 for s in ['""', '""" """', '"hi" "ipython"']:
350 self.assertFalse(completer.has_open_quotes(s))
361 self.assertFalse(completer.has_open_quotes(s))
351
362
352 @pytest.mark.xfail(
363 @pytest.mark.xfail(
353 sys.platform == "win32", reason="abspath completions fail on Windows"
364 sys.platform == "win32", reason="abspath completions fail on Windows"
354 )
365 )
355 def test_abspath_file_completions(self):
366 def test_abspath_file_completions(self):
356 ip = get_ipython()
367 ip = get_ipython()
357 with TemporaryDirectory() as tmpdir:
368 with TemporaryDirectory() as tmpdir:
358 prefix = os.path.join(tmpdir, "foo")
369 prefix = os.path.join(tmpdir, "foo")
359 suffixes = ["1", "2"]
370 suffixes = ["1", "2"]
360 names = [prefix + s for s in suffixes]
371 names = [prefix + s for s in suffixes]
361 for n in names:
372 for n in names:
362 open(n, "w", encoding="utf-8").close()
373 open(n, "w", encoding="utf-8").close()
363
374
364 # Check simple completion
375 # Check simple completion
365 c = ip.complete(prefix)[1]
376 c = ip.complete(prefix)[1]
366 self.assertEqual(c, names)
377 self.assertEqual(c, names)
367
378
368 # Now check with a function call
379 # Now check with a function call
369 cmd = 'a = f("%s' % prefix
380 cmd = 'a = f("%s' % prefix
370 c = ip.complete(prefix, cmd)[1]
381 c = ip.complete(prefix, cmd)[1]
371 comp = [prefix + s for s in suffixes]
382 comp = [prefix + s for s in suffixes]
372 self.assertEqual(c, comp)
383 self.assertEqual(c, comp)
373
384
374 def test_local_file_completions(self):
385 def test_local_file_completions(self):
375 ip = get_ipython()
386 ip = get_ipython()
376 with TemporaryWorkingDirectory():
387 with TemporaryWorkingDirectory():
377 prefix = "./foo"
388 prefix = "./foo"
378 suffixes = ["1", "2"]
389 suffixes = ["1", "2"]
379 names = [prefix + s for s in suffixes]
390 names = [prefix + s for s in suffixes]
380 for n in names:
391 for n in names:
381 open(n, "w", encoding="utf-8").close()
392 open(n, "w", encoding="utf-8").close()
382
393
383 # Check simple completion
394 # Check simple completion
384 c = ip.complete(prefix)[1]
395 c = ip.complete(prefix)[1]
385 self.assertEqual(c, names)
396 self.assertEqual(c, names)
386
397
387 # Now check with a function call
398 # Now check with a function call
388 cmd = 'a = f("%s' % prefix
399 cmd = 'a = f("%s' % prefix
389 c = ip.complete(prefix, cmd)[1]
400 c = ip.complete(prefix, cmd)[1]
390 comp = {prefix + s for s in suffixes}
401 comp = {prefix + s for s in suffixes}
391 self.assertTrue(comp.issubset(set(c)))
402 self.assertTrue(comp.issubset(set(c)))
392
403
393 def test_quoted_file_completions(self):
404 def test_quoted_file_completions(self):
394 ip = get_ipython()
405 ip = get_ipython()
395
406
396 def _(text):
407 def _(text):
397 return ip.Completer._complete(
408 return ip.Completer._complete(
398 cursor_line=0, cursor_pos=len(text), full_text=text
409 cursor_line=0, cursor_pos=len(text), full_text=text
399 )["IPCompleter.file_matcher"]["completions"]
410 )["IPCompleter.file_matcher"]["completions"]
400
411
401 with TemporaryWorkingDirectory():
412 with TemporaryWorkingDirectory():
402 name = "foo'bar"
413 name = "foo'bar"
403 open(name, "w", encoding="utf-8").close()
414 open(name, "w", encoding="utf-8").close()
404
415
405 # Don't escape Windows
416 # Don't escape Windows
406 escaped = name if sys.platform == "win32" else "foo\\'bar"
417 escaped = name if sys.platform == "win32" else "foo\\'bar"
407
418
408 # Single quote matches embedded single quote
419 # Single quote matches embedded single quote
409 c = _("open('foo")[0]
420 c = _("open('foo")[0]
410 self.assertEqual(c.text, escaped)
421 self.assertEqual(c.text, escaped)
411
422
412 # Double quote requires no escape
423 # Double quote requires no escape
413 c = _('open("foo')[0]
424 c = _('open("foo')[0]
414 self.assertEqual(c.text, name)
425 self.assertEqual(c.text, name)
415
426
416 # No quote requires an escape
427 # No quote requires an escape
417 c = _("%ls foo")[0]
428 c = _("%ls foo")[0]
418 self.assertEqual(c.text, escaped)
429 self.assertEqual(c.text, escaped)
419
430
420 def test_all_completions_dups(self):
431 def test_all_completions_dups(self):
421 """
432 """
422 Make sure the output of `IPCompleter.all_completions` does not have
433 Make sure the output of `IPCompleter.all_completions` does not have
423 duplicated prefixes.
434 duplicated prefixes.
424 """
435 """
425 ip = get_ipython()
436 ip = get_ipython()
426 c = ip.Completer
437 c = ip.Completer
427 ip.ex("class TestClass():\n\ta=1\n\ta1=2")
438 ip.ex("class TestClass():\n\ta=1\n\ta1=2")
428 for jedi_status in [True, False]:
439 for jedi_status in [True, False]:
429 with provisionalcompleter():
440 with provisionalcompleter():
430 ip.Completer.use_jedi = jedi_status
441 ip.Completer.use_jedi = jedi_status
431 matches = c.all_completions("TestCl")
442 matches = c.all_completions("TestCl")
432 assert matches == ["TestClass"], (jedi_status, matches)
443 assert matches == ["TestClass"], (jedi_status, matches)
433 matches = c.all_completions("TestClass.")
444 matches = c.all_completions("TestClass.")
434 assert len(matches) > 2, (jedi_status, matches)
445 assert len(matches) > 2, (jedi_status, matches)
435 matches = c.all_completions("TestClass.a")
446 matches = c.all_completions("TestClass.a")
436 assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
447 assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
437
448
438 def test_jedi(self):
449 def test_jedi(self):
439 """
450 """
440 A couple of issue we had with Jedi
451 A couple of issue we had with Jedi
441 """
452 """
442 ip = get_ipython()
453 ip = get_ipython()
443
454
444 def _test_complete(reason, s, comp, start=None, end=None):
455 def _test_complete(reason, s, comp, start=None, end=None):
445 l = len(s)
456 l = len(s)
446 start = start if start is not None else l
457 start = start if start is not None else l
447 end = end if end is not None else l
458 end = end if end is not None else l
448 with provisionalcompleter():
459 with provisionalcompleter():
449 ip.Completer.use_jedi = True
460 ip.Completer.use_jedi = True
450 completions = set(ip.Completer.completions(s, l))
461 completions = set(ip.Completer.completions(s, l))
451 ip.Completer.use_jedi = False
462 ip.Completer.use_jedi = False
452 assert Completion(start, end, comp) in completions, reason
463 assert Completion(start, end, comp) in completions, reason
453
464
454 def _test_not_complete(reason, s, comp):
465 def _test_not_complete(reason, s, comp):
455 l = len(s)
466 l = len(s)
456 with provisionalcompleter():
467 with provisionalcompleter():
457 ip.Completer.use_jedi = True
468 ip.Completer.use_jedi = True
458 completions = set(ip.Completer.completions(s, l))
469 completions = set(ip.Completer.completions(s, l))
459 ip.Completer.use_jedi = False
470 ip.Completer.use_jedi = False
460 assert Completion(l, l, comp) not in completions, reason
471 assert Completion(l, l, comp) not in completions, reason
461
472
462 import jedi
473 import jedi
463
474
464 jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
475 jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
465 if jedi_version > (0, 10):
476 if jedi_version > (0, 10):
466 _test_complete("jedi >0.9 should complete and not crash", "a=1;a.", "real")
477 _test_complete("jedi >0.9 should complete and not crash", "a=1;a.", "real")
467 _test_complete("can infer first argument", 'a=(1,"foo");a[0].', "real")
478 _test_complete("can infer first argument", 'a=(1,"foo");a[0].', "real")
468 _test_complete("can infer second argument", 'a=(1,"foo");a[1].', "capitalize")
479 _test_complete("can infer second argument", 'a=(1,"foo");a[1].', "capitalize")
469 _test_complete("cover duplicate completions", "im", "import", 0, 2)
480 _test_complete("cover duplicate completions", "im", "import", 0, 2)
470
481
471 _test_not_complete("does not mix types", 'a=(1,"foo");a[0].', "capitalize")
482 _test_not_complete("does not mix types", 'a=(1,"foo");a[0].', "capitalize")
472
483
473 def test_completion_have_signature(self):
484 def test_completion_have_signature(self):
474 """
485 """
475 Lets make sure jedi is capable of pulling out the signature of the function we are completing.
486 Lets make sure jedi is capable of pulling out the signature of the function we are completing.
476 """
487 """
477 ip = get_ipython()
488 ip = get_ipython()
478 with provisionalcompleter():
489 with provisionalcompleter():
479 ip.Completer.use_jedi = True
490 ip.Completer.use_jedi = True
480 completions = ip.Completer.completions("ope", 3)
491 completions = ip.Completer.completions("ope", 3)
481 c = next(completions) # should be `open`
492 c = next(completions) # should be `open`
482 ip.Completer.use_jedi = False
493 ip.Completer.use_jedi = False
483 assert "file" in c.signature, "Signature of function was not found by completer"
494 assert "file" in c.signature, "Signature of function was not found by completer"
484 assert (
495 assert (
485 "encoding" in c.signature
496 "encoding" in c.signature
486 ), "Signature of function was not found by completer"
497 ), "Signature of function was not found by completer"
487
498
488 def test_completions_have_type(self):
499 def test_completions_have_type(self):
489 """
500 """
490 Lets make sure matchers provide completion type.
501 Lets make sure matchers provide completion type.
491 """
502 """
492 ip = get_ipython()
503 ip = get_ipython()
493 with provisionalcompleter():
504 with provisionalcompleter():
494 ip.Completer.use_jedi = False
505 ip.Completer.use_jedi = False
495 completions = ip.Completer.completions("%tim", 3)
506 completions = ip.Completer.completions("%tim", 3)
496 c = next(completions) # should be `%time` or similar
507 c = next(completions) # should be `%time` or similar
497 assert c.type == "magic", "Type of magic was not assigned by completer"
508 assert c.type == "magic", "Type of magic was not assigned by completer"
498
509
499 @pytest.mark.xfail(reason="Known failure on jedi<=0.18.0")
510 @pytest.mark.xfail(reason="Known failure on jedi<=0.18.0")
500 def test_deduplicate_completions(self):
511 def test_deduplicate_completions(self):
501 """
512 """
502 Test that completions are correctly deduplicated (even if ranges are not the same)
513 Test that completions are correctly deduplicated (even if ranges are not the same)
503 """
514 """
504 ip = get_ipython()
515 ip = get_ipython()
505 ip.ex(
516 ip.ex(
506 textwrap.dedent(
517 textwrap.dedent(
507 """
518 """
508 class Z:
519 class Z:
509 zoo = 1
520 zoo = 1
510 """
521 """
511 )
522 )
512 )
523 )
513 with provisionalcompleter():
524 with provisionalcompleter():
514 ip.Completer.use_jedi = True
525 ip.Completer.use_jedi = True
515 l = list(
526 l = list(
516 _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
527 _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
517 )
528 )
518 ip.Completer.use_jedi = False
529 ip.Completer.use_jedi = False
519
530
520 assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
531 assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
521 assert l[0].text == "zoo" # and not `it.accumulate`
532 assert l[0].text == "zoo" # and not `it.accumulate`
522
533
523 def test_greedy_completions(self):
534 def test_greedy_completions(self):
524 """
535 """
525 Test the capability of the Greedy completer.
536 Test the capability of the Greedy completer.
526
537
527 Most of the test here does not really show off the greedy completer, for proof
538 Most of the test here does not really show off the greedy completer, for proof
528 each of the text below now pass with Jedi. The greedy completer is capable of more.
539 each of the text below now pass with Jedi. The greedy completer is capable of more.
529
540
530 See the :any:`test_dict_key_completion_contexts`
541 See the :any:`test_dict_key_completion_contexts`
531
542
532 """
543 """
533 ip = get_ipython()
544 ip = get_ipython()
534 ip.ex("a=list(range(5))")
545 ip.ex("a=list(range(5))")
535 _, c = ip.complete(".", line="a[0].")
546 _, c = ip.complete(".", line="a[0].")
536 self.assertFalse(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
547 self.assertFalse(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
537
548
538 def _(line, cursor_pos, expect, message, completion):
549 def _(line, cursor_pos, expect, message, completion):
539 with greedy_completion(), provisionalcompleter():
550 with greedy_completion(), provisionalcompleter():
540 ip.Completer.use_jedi = False
551 ip.Completer.use_jedi = False
541 _, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
552 _, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
542 self.assertIn(expect, c, message % c)
553 self.assertIn(expect, c, message % c)
543
554
544 ip.Completer.use_jedi = True
555 ip.Completer.use_jedi = True
545 with provisionalcompleter():
556 with provisionalcompleter():
546 completions = ip.Completer.completions(line, cursor_pos)
557 completions = ip.Completer.completions(line, cursor_pos)
547 self.assertIn(completion, completions)
558 self.assertIn(completion, completions)
548
559
549 with provisionalcompleter():
560 with provisionalcompleter():
550 _(
561 _(
551 "a[0].",
562 "a[0].",
552 5,
563 5,
553 "a[0].real",
564 "a[0].real",
554 "Should have completed on a[0].: %s",
565 "Should have completed on a[0].: %s",
555 Completion(5, 5, "real"),
566 Completion(5, 5, "real"),
556 )
567 )
557 _(
568 _(
558 "a[0].r",
569 "a[0].r",
559 6,
570 6,
560 "a[0].real",
571 "a[0].real",
561 "Should have completed on a[0].r: %s",
572 "Should have completed on a[0].r: %s",
562 Completion(5, 6, "real"),
573 Completion(5, 6, "real"),
563 )
574 )
564
575
565 _(
576 _(
566 "a[0].from_",
577 "a[0].from_",
567 10,
578 10,
568 "a[0].from_bytes",
579 "a[0].from_bytes",
569 "Should have completed on a[0].from_: %s",
580 "Should have completed on a[0].from_: %s",
570 Completion(5, 10, "from_bytes"),
581 Completion(5, 10, "from_bytes"),
571 )
582 )
572
583
573 def test_omit__names(self):
584 def test_omit__names(self):
574 # also happens to test IPCompleter as a configurable
585 # also happens to test IPCompleter as a configurable
575 ip = get_ipython()
586 ip = get_ipython()
576 ip._hidden_attr = 1
587 ip._hidden_attr = 1
577 ip._x = {}
588 ip._x = {}
578 c = ip.Completer
589 c = ip.Completer
579 ip.ex("ip=get_ipython()")
590 ip.ex("ip=get_ipython()")
580 cfg = Config()
591 cfg = Config()
581 cfg.IPCompleter.omit__names = 0
592 cfg.IPCompleter.omit__names = 0
582 c.update_config(cfg)
593 c.update_config(cfg)
583 with provisionalcompleter():
594 with provisionalcompleter():
584 c.use_jedi = False
595 c.use_jedi = False
585 s, matches = c.complete("ip.")
596 s, matches = c.complete("ip.")
586 self.assertIn("ip.__str__", matches)
597 self.assertIn("ip.__str__", matches)
587 self.assertIn("ip._hidden_attr", matches)
598 self.assertIn("ip._hidden_attr", matches)
588
599
589 # c.use_jedi = True
600 # c.use_jedi = True
590 # completions = set(c.completions('ip.', 3))
601 # completions = set(c.completions('ip.', 3))
591 # self.assertIn(Completion(3, 3, '__str__'), completions)
602 # self.assertIn(Completion(3, 3, '__str__'), completions)
592 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
603 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
593
604
594 cfg = Config()
605 cfg = Config()
595 cfg.IPCompleter.omit__names = 1
606 cfg.IPCompleter.omit__names = 1
596 c.update_config(cfg)
607 c.update_config(cfg)
597 with provisionalcompleter():
608 with provisionalcompleter():
598 c.use_jedi = False
609 c.use_jedi = False
599 s, matches = c.complete("ip.")
610 s, matches = c.complete("ip.")
600 self.assertNotIn("ip.__str__", matches)
611 self.assertNotIn("ip.__str__", matches)
601 # self.assertIn('ip._hidden_attr', matches)
612 # self.assertIn('ip._hidden_attr', matches)
602
613
603 # c.use_jedi = True
614 # c.use_jedi = True
604 # completions = set(c.completions('ip.', 3))
615 # completions = set(c.completions('ip.', 3))
605 # self.assertNotIn(Completion(3,3,'__str__'), completions)
616 # self.assertNotIn(Completion(3,3,'__str__'), completions)
606 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
617 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
607
618
608 cfg = Config()
619 cfg = Config()
609 cfg.IPCompleter.omit__names = 2
620 cfg.IPCompleter.omit__names = 2
610 c.update_config(cfg)
621 c.update_config(cfg)
611 with provisionalcompleter():
622 with provisionalcompleter():
612 c.use_jedi = False
623 c.use_jedi = False
613 s, matches = c.complete("ip.")
624 s, matches = c.complete("ip.")
614 self.assertNotIn("ip.__str__", matches)
625 self.assertNotIn("ip.__str__", matches)
615 self.assertNotIn("ip._hidden_attr", matches)
626 self.assertNotIn("ip._hidden_attr", matches)
616
627
617 # c.use_jedi = True
628 # c.use_jedi = True
618 # completions = set(c.completions('ip.', 3))
629 # completions = set(c.completions('ip.', 3))
619 # self.assertNotIn(Completion(3,3,'__str__'), completions)
630 # self.assertNotIn(Completion(3,3,'__str__'), completions)
620 # self.assertNotIn(Completion(3,3, "_hidden_attr"), completions)
631 # self.assertNotIn(Completion(3,3, "_hidden_attr"), completions)
621
632
622 with provisionalcompleter():
633 with provisionalcompleter():
623 c.use_jedi = False
634 c.use_jedi = False
624 s, matches = c.complete("ip._x.")
635 s, matches = c.complete("ip._x.")
625 self.assertIn("ip._x.keys", matches)
636 self.assertIn("ip._x.keys", matches)
626
637
627 # c.use_jedi = True
638 # c.use_jedi = True
628 # completions = set(c.completions('ip._x.', 6))
639 # completions = set(c.completions('ip._x.', 6))
629 # self.assertIn(Completion(6,6, "keys"), completions)
640 # self.assertIn(Completion(6,6, "keys"), completions)
630
641
631 del ip._hidden_attr
642 del ip._hidden_attr
632 del ip._x
643 del ip._x
633
644
634 def test_limit_to__all__False_ok(self):
645 def test_limit_to__all__False_ok(self):
635 """
646 """
636 Limit to all is deprecated, once we remove it this test can go away.
647 Limit to all is deprecated, once we remove it this test can go away.
637 """
648 """
638 ip = get_ipython()
649 ip = get_ipython()
639 c = ip.Completer
650 c = ip.Completer
640 c.use_jedi = False
651 c.use_jedi = False
641 ip.ex("class D: x=24")
652 ip.ex("class D: x=24")
642 ip.ex("d=D()")
653 ip.ex("d=D()")
643 cfg = Config()
654 cfg = Config()
644 cfg.IPCompleter.limit_to__all__ = False
655 cfg.IPCompleter.limit_to__all__ = False
645 c.update_config(cfg)
656 c.update_config(cfg)
646 s, matches = c.complete("d.")
657 s, matches = c.complete("d.")
647 self.assertIn("d.x", matches)
658 self.assertIn("d.x", matches)
648
659
649 def test_get__all__entries_ok(self):
660 def test_get__all__entries_ok(self):
650 class A:
661 class A:
651 __all__ = ["x", 1]
662 __all__ = ["x", 1]
652
663
653 words = completer.get__all__entries(A())
664 words = completer.get__all__entries(A())
654 self.assertEqual(words, ["x"])
665 self.assertEqual(words, ["x"])
655
666
656 def test_get__all__entries_no__all__ok(self):
667 def test_get__all__entries_no__all__ok(self):
657 class A:
668 class A:
658 pass
669 pass
659
670
660 words = completer.get__all__entries(A())
671 words = completer.get__all__entries(A())
661 self.assertEqual(words, [])
672 self.assertEqual(words, [])
662
673
663 def test_func_kw_completions(self):
674 def test_func_kw_completions(self):
664 ip = get_ipython()
675 ip = get_ipython()
665 c = ip.Completer
676 c = ip.Completer
666 c.use_jedi = False
677 c.use_jedi = False
667 ip.ex("def myfunc(a=1,b=2): return a+b")
678 ip.ex("def myfunc(a=1,b=2): return a+b")
668 s, matches = c.complete(None, "myfunc(1,b")
679 s, matches = c.complete(None, "myfunc(1,b")
669 self.assertIn("b=", matches)
680 self.assertIn("b=", matches)
670 # Simulate completing with cursor right after b (pos==10):
681 # Simulate completing with cursor right after b (pos==10):
671 s, matches = c.complete(None, "myfunc(1,b)", 10)
682 s, matches = c.complete(None, "myfunc(1,b)", 10)
672 self.assertIn("b=", matches)
683 self.assertIn("b=", matches)
673 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
684 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
674 self.assertIn("b=", matches)
685 self.assertIn("b=", matches)
675 # builtin function
686 # builtin function
676 s, matches = c.complete(None, "min(k, k")
687 s, matches = c.complete(None, "min(k, k")
677 self.assertIn("key=", matches)
688 self.assertIn("key=", matches)
678
689
679 def test_default_arguments_from_docstring(self):
690 def test_default_arguments_from_docstring(self):
680 ip = get_ipython()
691 ip = get_ipython()
681 c = ip.Completer
692 c = ip.Completer
682 kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
693 kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
683 self.assertEqual(kwd, ["key"])
694 self.assertEqual(kwd, ["key"])
684 # with cython type etc
695 # with cython type etc
685 kwd = c._default_arguments_from_docstring(
696 kwd = c._default_arguments_from_docstring(
686 "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
697 "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
687 )
698 )
688 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
699 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
689 # white spaces
700 # white spaces
690 kwd = c._default_arguments_from_docstring(
701 kwd = c._default_arguments_from_docstring(
691 "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
702 "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
692 )
703 )
693 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
704 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
694
705
695 def test_line_magics(self):
706 def test_line_magics(self):
696 ip = get_ipython()
707 ip = get_ipython()
697 c = ip.Completer
708 c = ip.Completer
698 s, matches = c.complete(None, "lsmag")
709 s, matches = c.complete(None, "lsmag")
699 self.assertIn("%lsmagic", matches)
710 self.assertIn("%lsmagic", matches)
700 s, matches = c.complete(None, "%lsmag")
711 s, matches = c.complete(None, "%lsmag")
701 self.assertIn("%lsmagic", matches)
712 self.assertIn("%lsmagic", matches)
702
713
703 def test_cell_magics(self):
714 def test_cell_magics(self):
704 from IPython.core.magic import register_cell_magic
715 from IPython.core.magic import register_cell_magic
705
716
706 @register_cell_magic
717 @register_cell_magic
707 def _foo_cellm(line, cell):
718 def _foo_cellm(line, cell):
708 pass
719 pass
709
720
710 ip = get_ipython()
721 ip = get_ipython()
711 c = ip.Completer
722 c = ip.Completer
712
723
713 s, matches = c.complete(None, "_foo_ce")
724 s, matches = c.complete(None, "_foo_ce")
714 self.assertIn("%%_foo_cellm", matches)
725 self.assertIn("%%_foo_cellm", matches)
715 s, matches = c.complete(None, "%%_foo_ce")
726 s, matches = c.complete(None, "%%_foo_ce")
716 self.assertIn("%%_foo_cellm", matches)
727 self.assertIn("%%_foo_cellm", matches)
717
728
718 def test_line_cell_magics(self):
729 def test_line_cell_magics(self):
719 from IPython.core.magic import register_line_cell_magic
730 from IPython.core.magic import register_line_cell_magic
720
731
721 @register_line_cell_magic
732 @register_line_cell_magic
722 def _bar_cellm(line, cell):
733 def _bar_cellm(line, cell):
723 pass
734 pass
724
735
725 ip = get_ipython()
736 ip = get_ipython()
726 c = ip.Completer
737 c = ip.Completer
727
738
728 # The policy here is trickier, see comments in completion code. The
739 # The policy here is trickier, see comments in completion code. The
729 # returned values depend on whether the user passes %% or not explicitly,
740 # returned values depend on whether the user passes %% or not explicitly,
730 # and this will show a difference if the same name is both a line and cell
741 # and this will show a difference if the same name is both a line and cell
731 # magic.
742 # magic.
732 s, matches = c.complete(None, "_bar_ce")
743 s, matches = c.complete(None, "_bar_ce")
733 self.assertIn("%_bar_cellm", matches)
744 self.assertIn("%_bar_cellm", matches)
734 self.assertIn("%%_bar_cellm", matches)
745 self.assertIn("%%_bar_cellm", matches)
735 s, matches = c.complete(None, "%_bar_ce")
746 s, matches = c.complete(None, "%_bar_ce")
736 self.assertIn("%_bar_cellm", matches)
747 self.assertIn("%_bar_cellm", matches)
737 self.assertIn("%%_bar_cellm", matches)
748 self.assertIn("%%_bar_cellm", matches)
738 s, matches = c.complete(None, "%%_bar_ce")
749 s, matches = c.complete(None, "%%_bar_ce")
739 self.assertNotIn("%_bar_cellm", matches)
750 self.assertNotIn("%_bar_cellm", matches)
740 self.assertIn("%%_bar_cellm", matches)
751 self.assertIn("%%_bar_cellm", matches)
741
752
742 def test_magic_completion_order(self):
753 def test_magic_completion_order(self):
743 ip = get_ipython()
754 ip = get_ipython()
744 c = ip.Completer
755 c = ip.Completer
745
756
746 # Test ordering of line and cell magics.
757 # Test ordering of line and cell magics.
747 text, matches = c.complete("timeit")
758 text, matches = c.complete("timeit")
748 self.assertEqual(matches, ["%timeit", "%%timeit"])
759 self.assertEqual(matches, ["%timeit", "%%timeit"])
749
760
750 def test_magic_completion_shadowing(self):
761 def test_magic_completion_shadowing(self):
751 ip = get_ipython()
762 ip = get_ipython()
752 c = ip.Completer
763 c = ip.Completer
753 c.use_jedi = False
764 c.use_jedi = False
754
765
755 # Before importing matplotlib, %matplotlib magic should be the only option.
766 # Before importing matplotlib, %matplotlib magic should be the only option.
756 text, matches = c.complete("mat")
767 text, matches = c.complete("mat")
757 self.assertEqual(matches, ["%matplotlib"])
768 self.assertEqual(matches, ["%matplotlib"])
758
769
759 # The newly introduced name should shadow the magic.
770 # The newly introduced name should shadow the magic.
760 ip.run_cell("matplotlib = 1")
771 ip.run_cell("matplotlib = 1")
761 text, matches = c.complete("mat")
772 text, matches = c.complete("mat")
762 self.assertEqual(matches, ["matplotlib"])
773 self.assertEqual(matches, ["matplotlib"])
763
774
764 # After removing matplotlib from namespace, the magic should again be
775 # After removing matplotlib from namespace, the magic should again be
765 # the only option.
776 # the only option.
766 del ip.user_ns["matplotlib"]
777 del ip.user_ns["matplotlib"]
767 text, matches = c.complete("mat")
778 text, matches = c.complete("mat")
768 self.assertEqual(matches, ["%matplotlib"])
779 self.assertEqual(matches, ["%matplotlib"])
769
780
770 def test_magic_completion_shadowing_explicit(self):
781 def test_magic_completion_shadowing_explicit(self):
771 """
782 """
772 If the user try to complete a shadowed magic, and explicit % start should
783 If the user try to complete a shadowed magic, and explicit % start should
773 still return the completions.
784 still return the completions.
774 """
785 """
775 ip = get_ipython()
786 ip = get_ipython()
776 c = ip.Completer
787 c = ip.Completer
777
788
778 # Before importing matplotlib, %matplotlib magic should be the only option.
789 # Before importing matplotlib, %matplotlib magic should be the only option.
779 text, matches = c.complete("%mat")
790 text, matches = c.complete("%mat")
780 self.assertEqual(matches, ["%matplotlib"])
791 self.assertEqual(matches, ["%matplotlib"])
781
792
782 ip.run_cell("matplotlib = 1")
793 ip.run_cell("matplotlib = 1")
783
794
784 # After removing matplotlib from namespace, the magic should still be
795 # After removing matplotlib from namespace, the magic should still be
785 # the only option.
796 # the only option.
786 text, matches = c.complete("%mat")
797 text, matches = c.complete("%mat")
787 self.assertEqual(matches, ["%matplotlib"])
798 self.assertEqual(matches, ["%matplotlib"])
788
799
789 def test_magic_config(self):
800 def test_magic_config(self):
790 ip = get_ipython()
801 ip = get_ipython()
791 c = ip.Completer
802 c = ip.Completer
792
803
793 s, matches = c.complete(None, "conf")
804 s, matches = c.complete(None, "conf")
794 self.assertIn("%config", matches)
805 self.assertIn("%config", matches)
795 s, matches = c.complete(None, "conf")
806 s, matches = c.complete(None, "conf")
796 self.assertNotIn("AliasManager", matches)
807 self.assertNotIn("AliasManager", matches)
797 s, matches = c.complete(None, "config ")
808 s, matches = c.complete(None, "config ")
798 self.assertIn("AliasManager", matches)
809 self.assertIn("AliasManager", matches)
799 s, matches = c.complete(None, "%config ")
810 s, matches = c.complete(None, "%config ")
800 self.assertIn("AliasManager", matches)
811 self.assertIn("AliasManager", matches)
801 s, matches = c.complete(None, "config Ali")
812 s, matches = c.complete(None, "config Ali")
802 self.assertListEqual(["AliasManager"], matches)
813 self.assertListEqual(["AliasManager"], matches)
803 s, matches = c.complete(None, "%config Ali")
814 s, matches = c.complete(None, "%config Ali")
804 self.assertListEqual(["AliasManager"], matches)
815 self.assertListEqual(["AliasManager"], matches)
805 s, matches = c.complete(None, "config AliasManager")
816 s, matches = c.complete(None, "config AliasManager")
806 self.assertListEqual(["AliasManager"], matches)
817 self.assertListEqual(["AliasManager"], matches)
807 s, matches = c.complete(None, "%config AliasManager")
818 s, matches = c.complete(None, "%config AliasManager")
808 self.assertListEqual(["AliasManager"], matches)
819 self.assertListEqual(["AliasManager"], matches)
809 s, matches = c.complete(None, "config AliasManager.")
820 s, matches = c.complete(None, "config AliasManager.")
810 self.assertIn("AliasManager.default_aliases", matches)
821 self.assertIn("AliasManager.default_aliases", matches)
811 s, matches = c.complete(None, "%config AliasManager.")
822 s, matches = c.complete(None, "%config AliasManager.")
812 self.assertIn("AliasManager.default_aliases", matches)
823 self.assertIn("AliasManager.default_aliases", matches)
813 s, matches = c.complete(None, "config AliasManager.de")
824 s, matches = c.complete(None, "config AliasManager.de")
814 self.assertListEqual(["AliasManager.default_aliases"], matches)
825 self.assertListEqual(["AliasManager.default_aliases"], matches)
815 s, matches = c.complete(None, "config AliasManager.de")
826 s, matches = c.complete(None, "config AliasManager.de")
816 self.assertListEqual(["AliasManager.default_aliases"], matches)
827 self.assertListEqual(["AliasManager.default_aliases"], matches)
817
828
818 def test_magic_color(self):
829 def test_magic_color(self):
819 ip = get_ipython()
830 ip = get_ipython()
820 c = ip.Completer
831 c = ip.Completer
821
832
822 s, matches = c.complete(None, "colo")
833 s, matches = c.complete(None, "colo")
823 self.assertIn("%colors", matches)
834 self.assertIn("%colors", matches)
824 s, matches = c.complete(None, "colo")
835 s, matches = c.complete(None, "colo")
825 self.assertNotIn("NoColor", matches)
836 self.assertNotIn("NoColor", matches)
826 s, matches = c.complete(None, "%colors") # No trailing space
837 s, matches = c.complete(None, "%colors") # No trailing space
827 self.assertNotIn("NoColor", matches)
838 self.assertNotIn("NoColor", matches)
828 s, matches = c.complete(None, "colors ")
839 s, matches = c.complete(None, "colors ")
829 self.assertIn("NoColor", matches)
840 self.assertIn("NoColor", matches)
830 s, matches = c.complete(None, "%colors ")
841 s, matches = c.complete(None, "%colors ")
831 self.assertIn("NoColor", matches)
842 self.assertIn("NoColor", matches)
832 s, matches = c.complete(None, "colors NoCo")
843 s, matches = c.complete(None, "colors NoCo")
833 self.assertListEqual(["NoColor"], matches)
844 self.assertListEqual(["NoColor"], matches)
834 s, matches = c.complete(None, "%colors NoCo")
845 s, matches = c.complete(None, "%colors NoCo")
835 self.assertListEqual(["NoColor"], matches)
846 self.assertListEqual(["NoColor"], matches)
836
847
837 def test_match_dict_keys(self):
848 def test_match_dict_keys(self):
838 """
849 """
839 Test that match_dict_keys works on a couple of use case does return what
850 Test that match_dict_keys works on a couple of use case does return what
840 expected, and does not crash
851 expected, and does not crash
841 """
852 """
842 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
853 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
843
854
844 keys = ["foo", b"far"]
855 keys = ["foo", b"far"]
845 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
856 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
846 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
857 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
847 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
858 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
848 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
859 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
849
860
850 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
861 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
851 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
862 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
852 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
863 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
853 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
864 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
854
865
855 match_dict_keys
856
857 def test_match_dict_keys_tuple(self):
866 def test_match_dict_keys_tuple(self):
858 """
867 """
859 Test that match_dict_keys called with extra prefix works on a couple of use case,
868 Test that match_dict_keys called with extra prefix works on a couple of use case,
860 does return what expected, and does not crash.
869 does return what expected, and does not crash.
861 """
870 """
862 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
871 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
863
872
864 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
873 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
865
874
866 # Completion on first key == "foo"
875 # Completion on first key == "foo"
867 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
876 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
868 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
877 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
869 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
878 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
870 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
879 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
871 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
880 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
872 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
881 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
873 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
882 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
874 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
883 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
875
884
876 # No Completion
885 # No Completion
877 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
886 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
878 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
887 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
879
888
880 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
889 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
881 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
890 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
882 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
891 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
883 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
892 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
884 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
893 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
885
894
895 keys = [("foo", 1111), ("foo", 2222), (3333, "bar"), (3333, 'test')]
896 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["1111", "2222"])
897 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=(3333,)) == ("'", 1, ["bar", "test"])
898 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("3333",)) == ("'", 1, [])
899
886 def test_dict_key_completion_string(self):
900 def test_dict_key_completion_string(self):
887 """Test dictionary key completion for string keys"""
901 """Test dictionary key completion for string keys"""
888 ip = get_ipython()
902 ip = get_ipython()
889 complete = ip.Completer.complete
903 complete = ip.Completer.complete
890
904
891 ip.user_ns["d"] = {"abc": None}
905 ip.user_ns["d"] = {"abc": None}
892
906
893 # check completion at different stages
907 # check completion at different stages
894 _, matches = complete(line_buffer="d[")
908 _, matches = complete(line_buffer="d[")
895 self.assertIn("'abc'", matches)
909 self.assertIn("'abc'", matches)
896 self.assertNotIn("'abc']", matches)
910 self.assertNotIn("'abc']", matches)
897
911
898 _, matches = complete(line_buffer="d['")
912 _, matches = complete(line_buffer="d['")
899 self.assertIn("abc", matches)
913 self.assertIn("abc", matches)
900 self.assertNotIn("abc']", matches)
914 self.assertNotIn("abc']", matches)
901
915
902 _, matches = complete(line_buffer="d['a")
916 _, matches = complete(line_buffer="d['a")
903 self.assertIn("abc", matches)
917 self.assertIn("abc", matches)
904 self.assertNotIn("abc']", matches)
918 self.assertNotIn("abc']", matches)
905
919
906 # check use of different quoting
920 # check use of different quoting
907 _, matches = complete(line_buffer='d["')
921 _, matches = complete(line_buffer='d["')
908 self.assertIn("abc", matches)
922 self.assertIn("abc", matches)
909 self.assertNotIn('abc"]', matches)
923 self.assertNotIn('abc"]', matches)
910
924
911 _, matches = complete(line_buffer='d["a')
925 _, matches = complete(line_buffer='d["a')
912 self.assertIn("abc", matches)
926 self.assertIn("abc", matches)
913 self.assertNotIn('abc"]', matches)
927 self.assertNotIn('abc"]', matches)
914
928
915 # check sensitivity to following context
929 # check sensitivity to following context
916 _, matches = complete(line_buffer="d[]", cursor_pos=2)
930 _, matches = complete(line_buffer="d[]", cursor_pos=2)
917 self.assertIn("'abc'", matches)
931 self.assertIn("'abc'", matches)
918
932
919 _, matches = complete(line_buffer="d['']", cursor_pos=3)
933 _, matches = complete(line_buffer="d['']", cursor_pos=3)
920 self.assertIn("abc", matches)
934 self.assertIn("abc", matches)
921 self.assertNotIn("abc'", matches)
935 self.assertNotIn("abc'", matches)
922 self.assertNotIn("abc']", matches)
936 self.assertNotIn("abc']", matches)
923
937
924 # check multiple solutions are correctly returned and that noise is not
938 # check multiple solutions are correctly returned and that noise is not
925 ip.user_ns["d"] = {
939 ip.user_ns["d"] = {
926 "abc": None,
940 "abc": None,
927 "abd": None,
941 "abd": None,
928 "bad": None,
942 "bad": None,
929 object(): None,
943 object(): None,
930 5: None,
944 5: None,
931 ("abe", None): None,
945 ("abe", None): None,
932 (None, "abf"): None
946 (None, "abf"): None
933 }
947 }
934
948
935 _, matches = complete(line_buffer="d['a")
949 _, matches = complete(line_buffer="d['a")
936 self.assertIn("abc", matches)
950 self.assertIn("abc", matches)
937 self.assertIn("abd", matches)
951 self.assertIn("abd", matches)
938 self.assertNotIn("bad", matches)
952 self.assertNotIn("bad", matches)
939 self.assertNotIn("abe", matches)
953 self.assertNotIn("abe", matches)
940 self.assertNotIn("abf", matches)
954 self.assertNotIn("abf", matches)
941 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
955 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
942
956
943 # check escaping and whitespace
957 # check escaping and whitespace
944 ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
958 ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
945 _, matches = complete(line_buffer="d['a")
959 _, matches = complete(line_buffer="d['a")
946 self.assertIn("a\\nb", matches)
960 self.assertIn("a\\nb", matches)
947 self.assertIn("a\\'b", matches)
961 self.assertIn("a\\'b", matches)
948 self.assertIn('a"b', matches)
962 self.assertIn('a"b', matches)
949 self.assertIn("a word", matches)
963 self.assertIn("a word", matches)
950 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
964 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
951
965
952 # - can complete on non-initial word of the string
966 # - can complete on non-initial word of the string
953 _, matches = complete(line_buffer="d['a w")
967 _, matches = complete(line_buffer="d['a w")
954 self.assertIn("word", matches)
968 self.assertIn("word", matches)
955
969
956 # - understands quote escaping
970 # - understands quote escaping
957 _, matches = complete(line_buffer="d['a\\'")
971 _, matches = complete(line_buffer="d['a\\'")
958 self.assertIn("b", matches)
972 self.assertIn("b", matches)
959
973
960 # - default quoting should work like repr
974 # - default quoting should work like repr
961 _, matches = complete(line_buffer="d[")
975 _, matches = complete(line_buffer="d[")
962 self.assertIn('"a\'b"', matches)
976 self.assertIn('"a\'b"', matches)
963
977
964 # - when opening quote with ", possible to match with unescaped apostrophe
978 # - when opening quote with ", possible to match with unescaped apostrophe
965 _, matches = complete(line_buffer="d[\"a'")
979 _, matches = complete(line_buffer="d[\"a'")
966 self.assertIn("b", matches)
980 self.assertIn("b", matches)
967
981
968 # need to not split at delims that readline won't split at
982 # need to not split at delims that readline won't split at
969 if "-" not in ip.Completer.splitter.delims:
983 if "-" not in ip.Completer.splitter.delims:
970 ip.user_ns["d"] = {"before-after": None}
984 ip.user_ns["d"] = {"before-after": None}
971 _, matches = complete(line_buffer="d['before-af")
985 _, matches = complete(line_buffer="d['before-af")
972 self.assertIn("before-after", matches)
986 self.assertIn("before-after", matches)
973
987
974 # check completion on tuple-of-string keys at different stage - on first key
988 # check completion on tuple-of-string keys at different stage - on first key
975 ip.user_ns["d"] = {('foo', 'bar'): None}
989 ip.user_ns["d"] = {('foo', 'bar'): None}
976 _, matches = complete(line_buffer="d[")
990 _, matches = complete(line_buffer="d[")
977 self.assertIn("'foo'", matches)
991 self.assertIn("'foo'", matches)
978 self.assertNotIn("'foo']", matches)
992 self.assertNotIn("'foo']", matches)
979 self.assertNotIn("'bar'", matches)
993 self.assertNotIn("'bar'", matches)
980 self.assertNotIn("foo", matches)
994 self.assertNotIn("foo", matches)
981 self.assertNotIn("bar", matches)
995 self.assertNotIn("bar", matches)
982
996
983 # - match the prefix
997 # - match the prefix
984 _, matches = complete(line_buffer="d['f")
998 _, matches = complete(line_buffer="d['f")
985 self.assertIn("foo", matches)
999 self.assertIn("foo", matches)
986 self.assertNotIn("foo']", matches)
1000 self.assertNotIn("foo']", matches)
987 self.assertNotIn('foo"]', matches)
1001 self.assertNotIn('foo"]', matches)
988 _, matches = complete(line_buffer="d['foo")
1002 _, matches = complete(line_buffer="d['foo")
989 self.assertIn("foo", matches)
1003 self.assertIn("foo", matches)
990
1004
991 # - can complete on second key
1005 # - can complete on second key
992 _, matches = complete(line_buffer="d['foo', ")
1006 _, matches = complete(line_buffer="d['foo', ")
993 self.assertIn("'bar'", matches)
1007 self.assertIn("'bar'", matches)
994 _, matches = complete(line_buffer="d['foo', 'b")
1008 _, matches = complete(line_buffer="d['foo', 'b")
995 self.assertIn("bar", matches)
1009 self.assertIn("bar", matches)
996 self.assertNotIn("foo", matches)
1010 self.assertNotIn("foo", matches)
997
1011
998 # - does not propose missing keys
1012 # - does not propose missing keys
999 _, matches = complete(line_buffer="d['foo', 'f")
1013 _, matches = complete(line_buffer="d['foo', 'f")
1000 self.assertNotIn("bar", matches)
1014 self.assertNotIn("bar", matches)
1001 self.assertNotIn("foo", matches)
1015 self.assertNotIn("foo", matches)
1002
1016
1003 # check sensitivity to following context
1017 # check sensitivity to following context
1004 _, matches = complete(line_buffer="d['foo',]", cursor_pos=8)
1018 _, matches = complete(line_buffer="d['foo',]", cursor_pos=8)
1005 self.assertIn("'bar'", matches)
1019 self.assertIn("'bar'", matches)
1006 self.assertNotIn("bar", matches)
1020 self.assertNotIn("bar", matches)
1007 self.assertNotIn("'foo'", matches)
1021 self.assertNotIn("'foo'", matches)
1008 self.assertNotIn("foo", matches)
1022 self.assertNotIn("foo", matches)
1009
1023
1010 _, matches = complete(line_buffer="d['']", cursor_pos=3)
1024 _, matches = complete(line_buffer="d['']", cursor_pos=3)
1011 self.assertIn("foo", matches)
1025 self.assertIn("foo", matches)
1012 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1026 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1013
1027
1014 _, matches = complete(line_buffer='d[""]', cursor_pos=3)
1028 _, matches = complete(line_buffer='d[""]', cursor_pos=3)
1015 self.assertIn("foo", matches)
1029 self.assertIn("foo", matches)
1016 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1030 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1017
1031
1018 _, matches = complete(line_buffer='d["foo","]', cursor_pos=9)
1032 _, matches = complete(line_buffer='d["foo","]', cursor_pos=9)
1019 self.assertIn("bar", matches)
1033 self.assertIn("bar", matches)
1020 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1034 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1021
1035
1022 _, matches = complete(line_buffer='d["foo",]', cursor_pos=8)
1036 _, matches = complete(line_buffer='d["foo",]', cursor_pos=8)
1023 self.assertIn("'bar'", matches)
1037 self.assertIn("'bar'", matches)
1024 self.assertNotIn("bar", matches)
1038 self.assertNotIn("bar", matches)
1025
1039
1026 # Can complete with longer tuple keys
1040 # Can complete with longer tuple keys
1027 ip.user_ns["d"] = {('foo', 'bar', 'foobar'): None}
1041 ip.user_ns["d"] = {('foo', 'bar', 'foobar'): None}
1028
1042
1029 # - can complete second key
1043 # - can complete second key
1030 _, matches = complete(line_buffer="d['foo', 'b")
1044 _, matches = complete(line_buffer="d['foo', 'b")
1031 self.assertIn("bar", matches)
1045 self.assertIn("bar", matches)
1032 self.assertNotIn("foo", matches)
1046 self.assertNotIn("foo", matches)
1033 self.assertNotIn("foobar", matches)
1047 self.assertNotIn("foobar", matches)
1034
1048
1035 # - can complete third key
1049 # - can complete third key
1036 _, matches = complete(line_buffer="d['foo', 'bar', 'fo")
1050 _, matches = complete(line_buffer="d['foo', 'bar', 'fo")
1037 self.assertIn("foobar", matches)
1051 self.assertIn("foobar", matches)
1038 self.assertNotIn("foo", matches)
1052 self.assertNotIn("foo", matches)
1039 self.assertNotIn("bar", matches)
1053 self.assertNotIn("bar", matches)
1040
1054
1041 def test_dict_key_completion_contexts(self):
1055 def test_dict_key_completion_contexts(self):
1042 """Test expression contexts in which dict key completion occurs"""
1056 """Test expression contexts in which dict key completion occurs"""
1043 ip = get_ipython()
1057 ip = get_ipython()
1044 complete = ip.Completer.complete
1058 complete = ip.Completer.complete
1045 d = {"abc": None}
1059 d = {"abc": None}
1046 ip.user_ns["d"] = d
1060 ip.user_ns["d"] = d
1047
1061
1048 class C:
1062 class C:
1049 data = d
1063 data = d
1050
1064
1051 ip.user_ns["C"] = C
1065 ip.user_ns["C"] = C
1052 ip.user_ns["get"] = lambda: d
1066 ip.user_ns["get"] = lambda: d
1067 ip.user_ns["nested"] = {'x': d}
1053
1068
1054 def assert_no_completion(**kwargs):
1069 def assert_no_completion(**kwargs):
1055 _, matches = complete(**kwargs)
1070 _, matches = complete(**kwargs)
1056 self.assertNotIn("abc", matches)
1071 self.assertNotIn("abc", matches)
1057 self.assertNotIn("abc'", matches)
1072 self.assertNotIn("abc'", matches)
1058 self.assertNotIn("abc']", matches)
1073 self.assertNotIn("abc']", matches)
1059 self.assertNotIn("'abc'", matches)
1074 self.assertNotIn("'abc'", matches)
1060 self.assertNotIn("'abc']", matches)
1075 self.assertNotIn("'abc']", matches)
1061
1076
1062 def assert_completion(**kwargs):
1077 def assert_completion(**kwargs):
1063 _, matches = complete(**kwargs)
1078 _, matches = complete(**kwargs)
1064 self.assertIn("'abc'", matches)
1079 self.assertIn("'abc'", matches)
1065 self.assertNotIn("'abc']", matches)
1080 self.assertNotIn("'abc']", matches)
1066
1081
1067 # no completion after string closed, even if reopened
1082 # no completion after string closed, even if reopened
1068 assert_no_completion(line_buffer="d['a'")
1083 assert_no_completion(line_buffer="d['a'")
1069 assert_no_completion(line_buffer='d["a"')
1084 assert_no_completion(line_buffer='d["a"')
1070 assert_no_completion(line_buffer="d['a' + ")
1085 assert_no_completion(line_buffer="d['a' + ")
1071 assert_no_completion(line_buffer="d['a' + '")
1086 assert_no_completion(line_buffer="d['a' + '")
1072
1087
1073 # completion in non-trivial expressions
1088 # completion in non-trivial expressions
1074 assert_completion(line_buffer="+ d[")
1089 assert_completion(line_buffer="+ d[")
1075 assert_completion(line_buffer="(d[")
1090 assert_completion(line_buffer="(d[")
1076 assert_completion(line_buffer="C.data[")
1091 assert_completion(line_buffer="C.data[")
1077
1092
1093 # nested dict completion
1094 assert_completion(line_buffer="nested['x'][")
1095
1096 with evaluation_level('minimal'):
1097 with pytest.raises(AssertionError):
1098 assert_completion(line_buffer="nested['x'][")
1099
1078 # greedy flag
1100 # greedy flag
1079 def assert_completion(**kwargs):
1101 def assert_completion(**kwargs):
1080 _, matches = complete(**kwargs)
1102 _, matches = complete(**kwargs)
1081 self.assertIn("get()['abc']", matches)
1103 self.assertIn("get()['abc']", matches)
1082
1104
1083 assert_no_completion(line_buffer="get()[")
1105 assert_no_completion(line_buffer="get()[")
1084 with greedy_completion():
1106 with greedy_completion():
1085 assert_completion(line_buffer="get()[")
1107 assert_completion(line_buffer="get()[")
1086 assert_completion(line_buffer="get()['")
1108 assert_completion(line_buffer="get()['")
1087 assert_completion(line_buffer="get()['a")
1109 assert_completion(line_buffer="get()['a")
1088 assert_completion(line_buffer="get()['ab")
1110 assert_completion(line_buffer="get()['ab")
1089 assert_completion(line_buffer="get()['abc")
1111 assert_completion(line_buffer="get()['abc")
1090
1112
1091 def test_dict_key_completion_bytes(self):
1113 def test_dict_key_completion_bytes(self):
1092 """Test handling of bytes in dict key completion"""
1114 """Test handling of bytes in dict key completion"""
1093 ip = get_ipython()
1115 ip = get_ipython()
1094 complete = ip.Completer.complete
1116 complete = ip.Completer.complete
1095
1117
1096 ip.user_ns["d"] = {"abc": None, b"abd": None}
1118 ip.user_ns["d"] = {"abc": None, b"abd": None}
1097
1119
1098 _, matches = complete(line_buffer="d[")
1120 _, matches = complete(line_buffer="d[")
1099 self.assertIn("'abc'", matches)
1121 self.assertIn("'abc'", matches)
1100 self.assertIn("b'abd'", matches)
1122 self.assertIn("b'abd'", matches)
1101
1123
1102 if False: # not currently implemented
1124 if False: # not currently implemented
1103 _, matches = complete(line_buffer="d[b")
1125 _, matches = complete(line_buffer="d[b")
1104 self.assertIn("b'abd'", matches)
1126 self.assertIn("b'abd'", matches)
1105 self.assertNotIn("b'abc'", matches)
1127 self.assertNotIn("b'abc'", matches)
1106
1128
1107 _, matches = complete(line_buffer="d[b'")
1129 _, matches = complete(line_buffer="d[b'")
1108 self.assertIn("abd", matches)
1130 self.assertIn("abd", matches)
1109 self.assertNotIn("abc", matches)
1131 self.assertNotIn("abc", matches)
1110
1132
1111 _, matches = complete(line_buffer="d[B'")
1133 _, matches = complete(line_buffer="d[B'")
1112 self.assertIn("abd", matches)
1134 self.assertIn("abd", matches)
1113 self.assertNotIn("abc", matches)
1135 self.assertNotIn("abc", matches)
1114
1136
1115 _, matches = complete(line_buffer="d['")
1137 _, matches = complete(line_buffer="d['")
1116 self.assertIn("abc", matches)
1138 self.assertIn("abc", matches)
1117 self.assertNotIn("abd", matches)
1139 self.assertNotIn("abd", matches)
1118
1140
1119 def test_dict_key_completion_unicode_py3(self):
1141 def test_dict_key_completion_unicode_py3(self):
1120 """Test handling of unicode in dict key completion"""
1142 """Test handling of unicode in dict key completion"""
1121 ip = get_ipython()
1143 ip = get_ipython()
1122 complete = ip.Completer.complete
1144 complete = ip.Completer.complete
1123
1145
1124 ip.user_ns["d"] = {"a\u05d0": None}
1146 ip.user_ns["d"] = {"a\u05d0": None}
1125
1147
1126 # query using escape
1148 # query using escape
1127 if sys.platform != "win32":
1149 if sys.platform != "win32":
1128 # Known failure on Windows
1150 # Known failure on Windows
1129 _, matches = complete(line_buffer="d['a\\u05d0")
1151 _, matches = complete(line_buffer="d['a\\u05d0")
1130 self.assertIn("u05d0", matches) # tokenized after \\
1152 self.assertIn("u05d0", matches) # tokenized after \\
1131
1153
1132 # query using character
1154 # query using character
1133 _, matches = complete(line_buffer="d['a\u05d0")
1155 _, matches = complete(line_buffer="d['a\u05d0")
1134 self.assertIn("a\u05d0", matches)
1156 self.assertIn("a\u05d0", matches)
1135
1157
1136 with greedy_completion():
1158 with greedy_completion():
1137 # query using escape
1159 # query using escape
1138 _, matches = complete(line_buffer="d['a\\u05d0")
1160 _, matches = complete(line_buffer="d['a\\u05d0")
1139 self.assertIn("d['a\\u05d0']", matches) # tokenized after \\
1161 self.assertIn("d['a\\u05d0']", matches) # tokenized after \\
1140
1162
1141 # query using character
1163 # query using character
1142 _, matches = complete(line_buffer="d['a\u05d0")
1164 _, matches = complete(line_buffer="d['a\u05d0")
1143 self.assertIn("d['a\u05d0']", matches)
1165 self.assertIn("d['a\u05d0']", matches)
1144
1166
1145 @dec.skip_without("numpy")
1167 @dec.skip_without("numpy")
1146 def test_struct_array_key_completion(self):
1168 def test_struct_array_key_completion(self):
1147 """Test dict key completion applies to numpy struct arrays"""
1169 """Test dict key completion applies to numpy struct arrays"""
1148 import numpy
1170 import numpy
1149
1171
1150 ip = get_ipython()
1172 ip = get_ipython()
1151 complete = ip.Completer.complete
1173 complete = ip.Completer.complete
1152 ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
1174 ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
1153 _, matches = complete(line_buffer="d['")
1175 _, matches = complete(line_buffer="d['")
1154 self.assertIn("hello", matches)
1176 self.assertIn("hello", matches)
1155 self.assertIn("world", matches)
1177 self.assertIn("world", matches)
1156 # complete on the numpy struct itself
1178 # complete on the numpy struct itself
1157 dt = numpy.dtype(
1179 dt = numpy.dtype(
1158 [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
1180 [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
1159 )
1181 )
1160 x = numpy.zeros(2, dtype=dt)
1182 x = numpy.zeros(2, dtype=dt)
1161 ip.user_ns["d"] = x[1]
1183 ip.user_ns["d"] = x[1]
1162 _, matches = complete(line_buffer="d['")
1184 _, matches = complete(line_buffer="d['")
1163 self.assertIn("my_head", matches)
1185 self.assertIn("my_head", matches)
1164 self.assertIn("my_data", matches)
1186 self.assertIn("my_data", matches)
1165 # complete on a nested level
1187 def completes_on_nested():
1166 with greedy_completion():
1167 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1188 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1168 _, matches = complete(line_buffer="d[1]['my_head']['")
1189 _, matches = complete(line_buffer="d[1]['my_head']['")
1169 self.assertTrue(any(["my_dt" in m for m in matches]))
1190 self.assertTrue(any(["my_dt" in m for m in matches]))
1170 self.assertTrue(any(["my_df" in m for m in matches]))
1191 self.assertTrue(any(["my_df" in m for m in matches]))
1192 # complete on a nested level
1193 with greedy_completion():
1194 completes_on_nested()
1195
1196 with evaluation_level('limitted'):
1197 completes_on_nested()
1198
1199 with evaluation_level('minimal'):
1200 with pytest.raises(AssertionError):
1201 completes_on_nested()
1171
1202
1172 @dec.skip_without("pandas")
1203 @dec.skip_without("pandas")
1173 def test_dataframe_key_completion(self):
1204 def test_dataframe_key_completion(self):
1174 """Test dict key completion applies to pandas DataFrames"""
1205 """Test dict key completion applies to pandas DataFrames"""
1175 import pandas
1206 import pandas
1176
1207
1177 ip = get_ipython()
1208 ip = get_ipython()
1178 complete = ip.Completer.complete
1209 complete = ip.Completer.complete
1179 ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
1210 ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
1180 _, matches = complete(line_buffer="d['")
1211 _, matches = complete(line_buffer="d['")
1181 self.assertIn("hello", matches)
1212 self.assertIn("hello", matches)
1182 self.assertIn("world", matches)
1213 self.assertIn("world", matches)
1214 _, matches = complete(line_buffer="d.loc[:, '")
1215 self.assertIn("hello", matches)
1216 self.assertIn("world", matches)
1217 _, matches = complete(line_buffer="d.loc[1:, '")
1218 self.assertIn("hello", matches)
1219 _, matches = complete(line_buffer="d.loc[1:1, '")
1220 self.assertIn("hello", matches)
1221 _, matches = complete(line_buffer="d.loc[1:1:-1, '")
1222 self.assertIn("hello", matches)
1223 _, matches = complete(line_buffer="d.loc[::, '")
1224 self.assertIn("hello", matches)
1183
1225
1184 def test_dict_key_completion_invalids(self):
1226 def test_dict_key_completion_invalids(self):
1185 """Smoke test cases dict key completion can't handle"""
1227 """Smoke test cases dict key completion can't handle"""
1186 ip = get_ipython()
1228 ip = get_ipython()
1187 complete = ip.Completer.complete
1229 complete = ip.Completer.complete
1188
1230
1189 ip.user_ns["no_getitem"] = None
1231 ip.user_ns["no_getitem"] = None
1190 ip.user_ns["no_keys"] = []
1232 ip.user_ns["no_keys"] = []
1191 ip.user_ns["cant_call_keys"] = dict
1233 ip.user_ns["cant_call_keys"] = dict
1192 ip.user_ns["empty"] = {}
1234 ip.user_ns["empty"] = {}
1193 ip.user_ns["d"] = {"abc": 5}
1235 ip.user_ns["d"] = {"abc": 5}
1194
1236
1195 _, matches = complete(line_buffer="no_getitem['")
1237 _, matches = complete(line_buffer="no_getitem['")
1196 _, matches = complete(line_buffer="no_keys['")
1238 _, matches = complete(line_buffer="no_keys['")
1197 _, matches = complete(line_buffer="cant_call_keys['")
1239 _, matches = complete(line_buffer="cant_call_keys['")
1198 _, matches = complete(line_buffer="empty['")
1240 _, matches = complete(line_buffer="empty['")
1199 _, matches = complete(line_buffer="name_error['")
1241 _, matches = complete(line_buffer="name_error['")
1200 _, matches = complete(line_buffer="d['\\") # incomplete escape
1242 _, matches = complete(line_buffer="d['\\") # incomplete escape
1201
1243
1202 def test_object_key_completion(self):
1244 def test_object_key_completion(self):
1203 ip = get_ipython()
1245 ip = get_ipython()
1204 ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
1246 ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
1205
1247
1206 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
1248 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
1207 self.assertIn("qwerty", matches)
1249 self.assertIn("qwerty", matches)
1208 self.assertIn("qwick", matches)
1250 self.assertIn("qwick", matches)
1209
1251
1210 def test_class_key_completion(self):
1252 def test_class_key_completion(self):
1211 ip = get_ipython()
1253 ip = get_ipython()
1212 NamedInstanceClass("qwerty")
1254 NamedInstanceClass("qwerty")
1213 NamedInstanceClass("qwick")
1255 NamedInstanceClass("qwick")
1214 ip.user_ns["named_instance_class"] = NamedInstanceClass
1256 ip.user_ns["named_instance_class"] = NamedInstanceClass
1215
1257
1216 _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
1258 _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
1217 self.assertIn("qwerty", matches)
1259 self.assertIn("qwerty", matches)
1218 self.assertIn("qwick", matches)
1260 self.assertIn("qwick", matches)
1219
1261
1220 def test_tryimport(self):
1262 def test_tryimport(self):
1221 """
1263 """
1222 Test that try-import don't crash on trailing dot, and import modules before
1264 Test that try-import don't crash on trailing dot, and import modules before
1223 """
1265 """
1224 from IPython.core.completerlib import try_import
1266 from IPython.core.completerlib import try_import
1225
1267
1226 assert try_import("IPython.")
1268 assert try_import("IPython.")
1227
1269
1228 def test_aimport_module_completer(self):
1270 def test_aimport_module_completer(self):
1229 ip = get_ipython()
1271 ip = get_ipython()
1230 _, matches = ip.complete("i", "%aimport i")
1272 _, matches = ip.complete("i", "%aimport i")
1231 self.assertIn("io", matches)
1273 self.assertIn("io", matches)
1232 self.assertNotIn("int", matches)
1274 self.assertNotIn("int", matches)
1233
1275
1234 def test_nested_import_module_completer(self):
1276 def test_nested_import_module_completer(self):
1235 ip = get_ipython()
1277 ip = get_ipython()
1236 _, matches = ip.complete(None, "import IPython.co", 17)
1278 _, matches = ip.complete(None, "import IPython.co", 17)
1237 self.assertIn("IPython.core", matches)
1279 self.assertIn("IPython.core", matches)
1238 self.assertNotIn("import IPython.core", matches)
1280 self.assertNotIn("import IPython.core", matches)
1239 self.assertNotIn("IPython.display", matches)
1281 self.assertNotIn("IPython.display", matches)
1240
1282
1241 def test_import_module_completer(self):
1283 def test_import_module_completer(self):
1242 ip = get_ipython()
1284 ip = get_ipython()
1243 _, matches = ip.complete("i", "import i")
1285 _, matches = ip.complete("i", "import i")
1244 self.assertIn("io", matches)
1286 self.assertIn("io", matches)
1245 self.assertNotIn("int", matches)
1287 self.assertNotIn("int", matches)
1246
1288
1247 def test_from_module_completer(self):
1289 def test_from_module_completer(self):
1248 ip = get_ipython()
1290 ip = get_ipython()
1249 _, matches = ip.complete("B", "from io import B", 16)
1291 _, matches = ip.complete("B", "from io import B", 16)
1250 self.assertIn("BytesIO", matches)
1292 self.assertIn("BytesIO", matches)
1251 self.assertNotIn("BaseException", matches)
1293 self.assertNotIn("BaseException", matches)
1252
1294
1253 def test_snake_case_completion(self):
1295 def test_snake_case_completion(self):
1254 ip = get_ipython()
1296 ip = get_ipython()
1255 ip.Completer.use_jedi = False
1297 ip.Completer.use_jedi = False
1256 ip.user_ns["some_three"] = 3
1298 ip.user_ns["some_three"] = 3
1257 ip.user_ns["some_four"] = 4
1299 ip.user_ns["some_four"] = 4
1258 _, matches = ip.complete("s_", "print(s_f")
1300 _, matches = ip.complete("s_", "print(s_f")
1259 self.assertIn("some_three", matches)
1301 self.assertIn("some_three", matches)
1260 self.assertIn("some_four", matches)
1302 self.assertIn("some_four", matches)
1261
1303
1262 def test_mix_terms(self):
1304 def test_mix_terms(self):
1263 ip = get_ipython()
1305 ip = get_ipython()
1264 from textwrap import dedent
1306 from textwrap import dedent
1265
1307
1266 ip.Completer.use_jedi = False
1308 ip.Completer.use_jedi = False
1267 ip.ex(
1309 ip.ex(
1268 dedent(
1310 dedent(
1269 """
1311 """
1270 class Test:
1312 class Test:
1271 def meth(self, meth_arg1):
1313 def meth(self, meth_arg1):
1272 print("meth")
1314 print("meth")
1273
1315
1274 def meth_1(self, meth1_arg1, meth1_arg2):
1316 def meth_1(self, meth1_arg1, meth1_arg2):
1275 print("meth1")
1317 print("meth1")
1276
1318
1277 def meth_2(self, meth2_arg1, meth2_arg2):
1319 def meth_2(self, meth2_arg1, meth2_arg2):
1278 print("meth2")
1320 print("meth2")
1279 test = Test()
1321 test = Test()
1280 """
1322 """
1281 )
1323 )
1282 )
1324 )
1283 _, matches = ip.complete(None, "test.meth(")
1325 _, matches = ip.complete(None, "test.meth(")
1284 self.assertIn("meth_arg1=", matches)
1326 self.assertIn("meth_arg1=", matches)
1285 self.assertNotIn("meth2_arg1=", matches)
1327 self.assertNotIn("meth2_arg1=", matches)
1286
1328
1287 def test_percent_symbol_restrict_to_magic_completions(self):
1329 def test_percent_symbol_restrict_to_magic_completions(self):
1288 ip = get_ipython()
1330 ip = get_ipython()
1289 completer = ip.Completer
1331 completer = ip.Completer
1290 text = "%a"
1332 text = "%a"
1291
1333
1292 with provisionalcompleter():
1334 with provisionalcompleter():
1293 completer.use_jedi = True
1335 completer.use_jedi = True
1294 completions = completer.completions(text, len(text))
1336 completions = completer.completions(text, len(text))
1295 for c in completions:
1337 for c in completions:
1296 self.assertEqual(c.text[0], "%")
1338 self.assertEqual(c.text[0], "%")
1297
1339
1298 def test_fwd_unicode_restricts(self):
1340 def test_fwd_unicode_restricts(self):
1299 ip = get_ipython()
1341 ip = get_ipython()
1300 completer = ip.Completer
1342 completer = ip.Completer
1301 text = "\\ROMAN NUMERAL FIVE"
1343 text = "\\ROMAN NUMERAL FIVE"
1302
1344
1303 with provisionalcompleter():
1345 with provisionalcompleter():
1304 completer.use_jedi = True
1346 completer.use_jedi = True
1305 completions = [
1347 completions = [
1306 completion.text for completion in completer.completions(text, len(text))
1348 completion.text for completion in completer.completions(text, len(text))
1307 ]
1349 ]
1308 self.assertEqual(completions, ["\u2164"])
1350 self.assertEqual(completions, ["\u2164"])
1309
1351
1310 def test_dict_key_restrict_to_dicts(self):
1352 def test_dict_key_restrict_to_dicts(self):
1311 """Test that dict key suppresses non-dict completion items"""
1353 """Test that dict key suppresses non-dict completion items"""
1312 ip = get_ipython()
1354 ip = get_ipython()
1313 c = ip.Completer
1355 c = ip.Completer
1314 d = {"abc": None}
1356 d = {"abc": None}
1315 ip.user_ns["d"] = d
1357 ip.user_ns["d"] = d
1316
1358
1317 text = 'd["a'
1359 text = 'd["a'
1318
1360
1319 def _():
1361 def _():
1320 with provisionalcompleter():
1362 with provisionalcompleter():
1321 c.use_jedi = True
1363 c.use_jedi = True
1322 return [
1364 return [
1323 completion.text for completion in c.completions(text, len(text))
1365 completion.text for completion in c.completions(text, len(text))
1324 ]
1366 ]
1325
1367
1326 completions = _()
1368 completions = _()
1327 self.assertEqual(completions, ["abc"])
1369 self.assertEqual(completions, ["abc"])
1328
1370
1329 # check that it can be disabled in granular manner:
1371 # check that it can be disabled in granular manner:
1330 cfg = Config()
1372 cfg = Config()
1331 cfg.IPCompleter.suppress_competing_matchers = {
1373 cfg.IPCompleter.suppress_competing_matchers = {
1332 "IPCompleter.dict_key_matcher": False
1374 "IPCompleter.dict_key_matcher": False
1333 }
1375 }
1334 c.update_config(cfg)
1376 c.update_config(cfg)
1335
1377
1336 completions = _()
1378 completions = _()
1337 self.assertIn("abc", completions)
1379 self.assertIn("abc", completions)
1338 self.assertGreater(len(completions), 1)
1380 self.assertGreater(len(completions), 1)
1339
1381
1340 def test_matcher_suppression(self):
1382 def test_matcher_suppression(self):
1341 @completion_matcher(identifier="a_matcher")
1383 @completion_matcher(identifier="a_matcher")
1342 def a_matcher(text):
1384 def a_matcher(text):
1343 return ["completion_a"]
1385 return ["completion_a"]
1344
1386
1345 @completion_matcher(identifier="b_matcher", api_version=2)
1387 @completion_matcher(identifier="b_matcher", api_version=2)
1346 def b_matcher(context: CompletionContext):
1388 def b_matcher(context: CompletionContext):
1347 text = context.token
1389 text = context.token
1348 result = {"completions": [SimpleCompletion("completion_b")]}
1390 result = {"completions": [SimpleCompletion("completion_b")]}
1349
1391
1350 if text == "suppress c":
1392 if text == "suppress c":
1351 result["suppress"] = {"c_matcher"}
1393 result["suppress"] = {"c_matcher"}
1352
1394
1353 if text.startswith("suppress all"):
1395 if text.startswith("suppress all"):
1354 result["suppress"] = True
1396 result["suppress"] = True
1355 if text == "suppress all but c":
1397 if text == "suppress all but c":
1356 result["do_not_suppress"] = {"c_matcher"}
1398 result["do_not_suppress"] = {"c_matcher"}
1357 if text == "suppress all but a":
1399 if text == "suppress all but a":
1358 result["do_not_suppress"] = {"a_matcher"}
1400 result["do_not_suppress"] = {"a_matcher"}
1359
1401
1360 return result
1402 return result
1361
1403
1362 @completion_matcher(identifier="c_matcher")
1404 @completion_matcher(identifier="c_matcher")
1363 def c_matcher(text):
1405 def c_matcher(text):
1364 return ["completion_c"]
1406 return ["completion_c"]
1365
1407
1366 with custom_matchers([a_matcher, b_matcher, c_matcher]):
1408 with custom_matchers([a_matcher, b_matcher, c_matcher]):
1367 ip = get_ipython()
1409 ip = get_ipython()
1368 c = ip.Completer
1410 c = ip.Completer
1369
1411
1370 def _(text, expected):
1412 def _(text, expected):
1371 c.use_jedi = False
1413 c.use_jedi = False
1372 s, matches = c.complete(text)
1414 s, matches = c.complete(text)
1373 self.assertEqual(expected, matches)
1415 self.assertEqual(expected, matches)
1374
1416
1375 _("do not suppress", ["completion_a", "completion_b", "completion_c"])
1417 _("do not suppress", ["completion_a", "completion_b", "completion_c"])
1376 _("suppress all", ["completion_b"])
1418 _("suppress all", ["completion_b"])
1377 _("suppress all but a", ["completion_a", "completion_b"])
1419 _("suppress all but a", ["completion_a", "completion_b"])
1378 _("suppress all but c", ["completion_b", "completion_c"])
1420 _("suppress all but c", ["completion_b", "completion_c"])
1379
1421
1380 def configure(suppression_config):
1422 def configure(suppression_config):
1381 cfg = Config()
1423 cfg = Config()
1382 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1424 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1383 c.update_config(cfg)
1425 c.update_config(cfg)
1384
1426
1385 # test that configuration takes priority over the run-time decisions
1427 # test that configuration takes priority over the run-time decisions
1386
1428
1387 configure(False)
1429 configure(False)
1388 _("suppress all", ["completion_a", "completion_b", "completion_c"])
1430 _("suppress all", ["completion_a", "completion_b", "completion_c"])
1389
1431
1390 configure({"b_matcher": False})
1432 configure({"b_matcher": False})
1391 _("suppress all", ["completion_a", "completion_b", "completion_c"])
1433 _("suppress all", ["completion_a", "completion_b", "completion_c"])
1392
1434
1393 configure({"a_matcher": False})
1435 configure({"a_matcher": False})
1394 _("suppress all", ["completion_b"])
1436 _("suppress all", ["completion_b"])
1395
1437
1396 configure({"b_matcher": True})
1438 configure({"b_matcher": True})
1397 _("do not suppress", ["completion_b"])
1439 _("do not suppress", ["completion_b"])
1398
1440
1399 configure(True)
1441 configure(True)
1400 _("do not suppress", ["completion_a"])
1442 _("do not suppress", ["completion_a"])
1401
1443
1402 def test_matcher_suppression_with_iterator(self):
1444 def test_matcher_suppression_with_iterator(self):
1403 @completion_matcher(identifier="matcher_returning_iterator")
1445 @completion_matcher(identifier="matcher_returning_iterator")
1404 def matcher_returning_iterator(text):
1446 def matcher_returning_iterator(text):
1405 return iter(["completion_iter"])
1447 return iter(["completion_iter"])
1406
1448
1407 @completion_matcher(identifier="matcher_returning_list")
1449 @completion_matcher(identifier="matcher_returning_list")
1408 def matcher_returning_list(text):
1450 def matcher_returning_list(text):
1409 return ["completion_list"]
1451 return ["completion_list"]
1410
1452
1411 with custom_matchers([matcher_returning_iterator, matcher_returning_list]):
1453 with custom_matchers([matcher_returning_iterator, matcher_returning_list]):
1412 ip = get_ipython()
1454 ip = get_ipython()
1413 c = ip.Completer
1455 c = ip.Completer
1414
1456
1415 def _(text, expected):
1457 def _(text, expected):
1416 c.use_jedi = False
1458 c.use_jedi = False
1417 s, matches = c.complete(text)
1459 s, matches = c.complete(text)
1418 self.assertEqual(expected, matches)
1460 self.assertEqual(expected, matches)
1419
1461
1420 def configure(suppression_config):
1462 def configure(suppression_config):
1421 cfg = Config()
1463 cfg = Config()
1422 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1464 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1423 c.update_config(cfg)
1465 c.update_config(cfg)
1424
1466
1425 configure(False)
1467 configure(False)
1426 _("---", ["completion_iter", "completion_list"])
1468 _("---", ["completion_iter", "completion_list"])
1427
1469
1428 configure(True)
1470 configure(True)
1429 _("---", ["completion_iter"])
1471 _("---", ["completion_iter"])
1430
1472
1431 configure(None)
1473 configure(None)
1432 _("--", ["completion_iter", "completion_list"])
1474 _("--", ["completion_iter", "completion_list"])
1433
1475
1434 def test_matcher_suppression_with_jedi(self):
1476 def test_matcher_suppression_with_jedi(self):
1435 ip = get_ipython()
1477 ip = get_ipython()
1436 c = ip.Completer
1478 c = ip.Completer
1437 c.use_jedi = True
1479 c.use_jedi = True
1438
1480
1439 def configure(suppression_config):
1481 def configure(suppression_config):
1440 cfg = Config()
1482 cfg = Config()
1441 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1483 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1442 c.update_config(cfg)
1484 c.update_config(cfg)
1443
1485
1444 def _():
1486 def _():
1445 with provisionalcompleter():
1487 with provisionalcompleter():
1446 matches = [completion.text for completion in c.completions("dict.", 5)]
1488 matches = [completion.text for completion in c.completions("dict.", 5)]
1447 self.assertIn("keys", matches)
1489 self.assertIn("keys", matches)
1448
1490
1449 configure(False)
1491 configure(False)
1450 _()
1492 _()
1451
1493
1452 configure(True)
1494 configure(True)
1453 _()
1495 _()
1454
1496
1455 configure(None)
1497 configure(None)
1456 _()
1498 _()
1457
1499
1458 def test_matcher_disabling(self):
1500 def test_matcher_disabling(self):
1459 @completion_matcher(identifier="a_matcher")
1501 @completion_matcher(identifier="a_matcher")
1460 def a_matcher(text):
1502 def a_matcher(text):
1461 return ["completion_a"]
1503 return ["completion_a"]
1462
1504
1463 @completion_matcher(identifier="b_matcher")
1505 @completion_matcher(identifier="b_matcher")
1464 def b_matcher(text):
1506 def b_matcher(text):
1465 return ["completion_b"]
1507 return ["completion_b"]
1466
1508
1467 def _(expected):
1509 def _(expected):
1468 s, matches = c.complete("completion_")
1510 s, matches = c.complete("completion_")
1469 self.assertEqual(expected, matches)
1511 self.assertEqual(expected, matches)
1470
1512
1471 with custom_matchers([a_matcher, b_matcher]):
1513 with custom_matchers([a_matcher, b_matcher]):
1472 ip = get_ipython()
1514 ip = get_ipython()
1473 c = ip.Completer
1515 c = ip.Completer
1474
1516
1475 _(["completion_a", "completion_b"])
1517 _(["completion_a", "completion_b"])
1476
1518
1477 cfg = Config()
1519 cfg = Config()
1478 cfg.IPCompleter.disable_matchers = ["b_matcher"]
1520 cfg.IPCompleter.disable_matchers = ["b_matcher"]
1479 c.update_config(cfg)
1521 c.update_config(cfg)
1480
1522
1481 _(["completion_a"])
1523 _(["completion_a"])
1482
1524
1483 cfg.IPCompleter.disable_matchers = []
1525 cfg.IPCompleter.disable_matchers = []
1484 c.update_config(cfg)
1526 c.update_config(cfg)
1485
1527
1486 def test_matcher_priority(self):
1528 def test_matcher_priority(self):
1487 @completion_matcher(identifier="a_matcher", priority=0, api_version=2)
1529 @completion_matcher(identifier="a_matcher", priority=0, api_version=2)
1488 def a_matcher(text):
1530 def a_matcher(text):
1489 return {"completions": [SimpleCompletion("completion_a")], "suppress": True}
1531 return {"completions": [SimpleCompletion("completion_a")], "suppress": True}
1490
1532
1491 @completion_matcher(identifier="b_matcher", priority=2, api_version=2)
1533 @completion_matcher(identifier="b_matcher", priority=2, api_version=2)
1492 def b_matcher(text):
1534 def b_matcher(text):
1493 return {"completions": [SimpleCompletion("completion_b")], "suppress": True}
1535 return {"completions": [SimpleCompletion("completion_b")], "suppress": True}
1494
1536
1495 def _(expected):
1537 def _(expected):
1496 s, matches = c.complete("completion_")
1538 s, matches = c.complete("completion_")
1497 self.assertEqual(expected, matches)
1539 self.assertEqual(expected, matches)
1498
1540
1499 with custom_matchers([a_matcher, b_matcher]):
1541 with custom_matchers([a_matcher, b_matcher]):
1500 ip = get_ipython()
1542 ip = get_ipython()
1501 c = ip.Completer
1543 c = ip.Completer
1502
1544
1503 _(["completion_b"])
1545 _(["completion_b"])
1504 a_matcher.matcher_priority = 3
1546 a_matcher.matcher_priority = 3
1505 _(["completion_a"])
1547 _(["completion_a"])
General Comments 0
You need to be logged in to leave comments. Login now