##// END OF EJS Templates
Implement guarded evaluation, replace greedy, implement:...
krassowski -
Show More
This diff has been collapsed as it changes many lines, (541 lines changed) Show them Hide them
@@ -0,0 +1,541 b''
1 from typing import Callable, Protocol, Set, Tuple, NamedTuple, Literal, Union
2 import collections
3 import sys
4 import ast
5 import types
6 from functools import cached_property
7 from dataclasses import dataclass, field
8
9
10 class HasGetItem(Protocol):
11 def __getitem__(self, key) -> None: ...
12
13
14 class InstancesHaveGetItem(Protocol):
15 def __call__(self) -> HasGetItem: ...
16
17
18 class HasGetAttr(Protocol):
19 def __getattr__(self, key) -> None: ...
20
21
22 class DoesNotHaveGetAttr(Protocol):
23 pass
24
25 # By default `__getattr__` is not explicitly implemented on most objects
26 MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr]
27
28
29 def unbind_method(func: Callable) -> Union[Callable, None]:
30 """Get unbound method for given bound method.
31
32 Returns None if cannot get unbound method."""
33 owner = getattr(func, '__self__', None)
34 owner_class = type(owner)
35 name = getattr(func, '__name__', None)
36 instance_dict_overrides = getattr(owner, '__dict__', None)
37 if (
38 owner is not None
39 and
40 name
41 and
42 (
43 not instance_dict_overrides
44 or
45 (
46 instance_dict_overrides
47 and name not in instance_dict_overrides
48 )
49 )
50 ):
51 return getattr(owner_class, name)
52
53
54 @dataclass
55 class EvaluationPolicy:
56 allow_locals_access: bool = False
57 allow_globals_access: bool = False
58 allow_item_access: bool = False
59 allow_attr_access: bool = False
60 allow_builtins_access: bool = False
61 allow_any_calls: bool = False
62 allowed_calls: Set[Callable] = field(default_factory=set)
63
64 def can_get_item(self, value, item):
65 return self.allow_item_access
66
67 def can_get_attr(self, value, attr):
68 return self.allow_attr_access
69
70 def can_call(self, func):
71 if self.allow_any_calls:
72 return True
73
74 if func in self.allowed_calls:
75 return True
76
77 owner_method = unbind_method(func)
78 if owner_method and owner_method in self.allowed_calls:
79 return True
80
81 def has_original_dunder_external(value, module_name, access_path, method_name,):
82 try:
83 if module_name not in sys.modules:
84 return False
85 member_type = sys.modules[module_name]
86 for attr in access_path:
87 member_type = getattr(member_type, attr)
88 value_type = type(value)
89 if type(value) == member_type:
90 return True
91 if isinstance(value, member_type):
92 method = getattr(value_type, method_name, None)
93 member_method = getattr(member_type, method_name, None)
94 if member_method == method:
95 return True
96 except (AttributeError, KeyError):
97 return False
98
99
100 def has_original_dunder(
101 value,
102 allowed_types,
103 allowed_methods,
104 allowed_external,
105 method_name
106 ):
107 # note: Python ignores `__getattr__`/`__getitem__` on instances,
108 # we only need to check at class level
109 value_type = type(value)
110
111 # strict type check passes β†’ no need to check method
112 if value_type in allowed_types:
113 return True
114
115 method = getattr(value_type, method_name, None)
116
117 if not method:
118 return None
119
120 if method in allowed_methods:
121 return True
122
123 for module_name, *access_path in allowed_external:
124 if has_original_dunder_external(value, module_name, access_path, method_name):
125 return True
126
127 return False
128
129
130 @dataclass
131 class SelectivePolicy(EvaluationPolicy):
132 allowed_getitem: Set[HasGetItem] = field(default_factory=set)
133 allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set)
134 allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set)
135 allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set)
136
137 def can_get_attr(self, value, attr):
138 has_original_attribute = has_original_dunder(
139 value,
140 allowed_types=self.allowed_getattr,
141 allowed_methods=self._getattribute_methods,
142 allowed_external=self.allowed_getattr_external,
143 method_name='__getattribute__'
144 )
145 has_original_attr = has_original_dunder(
146 value,
147 allowed_types=self.allowed_getattr,
148 allowed_methods=self._getattr_methods,
149 allowed_external=self.allowed_getattr_external,
150 method_name='__getattr__'
151 )
152 # Many objects do not have `__getattr__`, this is fine
153 if has_original_attr is None and has_original_attribute:
154 return True
155
156 # Accept objects without modifications to `__getattr__` and `__getattribute__`
157 return has_original_attr and has_original_attribute
158
159 def get_attr(self, value, attr):
160 if self.can_get_attr(value, attr):
161 return getattr(value, attr)
162
163
164 def can_get_item(self, value, item):
165 """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified."""
166 return has_original_dunder(
167 value,
168 allowed_types=self.allowed_getitem,
169 allowed_methods=self._getitem_methods,
170 allowed_external=self.allowed_getitem_external,
171 method_name='__getitem__'
172 )
173
174 @cached_property
175 def _getitem_methods(self) -> Set[Callable]:
176 return self._safe_get_methods(
177 self.allowed_getitem,
178 '__getitem__'
179 )
180
181 @cached_property
182 def _getattr_methods(self) -> Set[Callable]:
183 return self._safe_get_methods(
184 self.allowed_getattr,
185 '__getattr__'
186 )
187
188 @cached_property
189 def _getattribute_methods(self) -> Set[Callable]:
190 return self._safe_get_methods(
191 self.allowed_getattr,
192 '__getattribute__'
193 )
194
195 def _safe_get_methods(self, classes, name) -> Set[Callable]:
196 return {
197 method
198 for class_ in classes
199 for method in [getattr(class_, name, None)]
200 if method
201 }
202
203
204 class DummyNamedTuple(NamedTuple):
205 pass
206
207
208 class EvaluationContext(NamedTuple):
209 locals_: dict
210 globals_: dict
211 evaluation: Literal['forbidden', 'minimal', 'limitted', 'unsafe', 'dangerous'] = 'forbidden'
212 in_subscript: bool = False
213
214
215 class IdentitySubscript:
216 def __getitem__(self, key):
217 return key
218
219 IDENTITY_SUBSCRIPT = IdentitySubscript()
220 SUBSCRIPT_MARKER = '__SUBSCRIPT_SENTINEL__'
221
222 class GuardRejection(ValueError):
223 pass
224
225
226 def guarded_eval(
227 code: str,
228 context: EvaluationContext
229 ):
230 locals_ = context.locals_
231
232 if context.evaluation == 'forbidden':
233 raise GuardRejection('Forbidden mode')
234
235 # note: not using `ast.literal_eval` as it does not implement
236 # getitem at all, for example it fails on simple `[0][1]`
237
238 if context.in_subscript:
239 # syntatic sugar for ellipsis (:) is only available in susbcripts
240 # so we need to trick the ast parser into thinking that we have
241 # a subscript, but we need to be able to later recognise that we did
242 # it so we can ignore the actual __getitem__ operation
243 if not code:
244 return tuple()
245 locals_ = locals_.copy()
246 locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT
247 code = SUBSCRIPT_MARKER + '[' + code + ']'
248 context = EvaluationContext(**{
249 **context._asdict(),
250 **{'locals_': locals_}
251 })
252
253 if context.evaluation == 'dangerous':
254 return eval(code, context.globals_, context.locals_)
255
256 expression = ast.parse(code, mode='eval')
257
258 return eval_node(expression, context)
259
260 def eval_node(node: Union[ast.AST, None], context: EvaluationContext):
261 """
262 Evaluate AST node in provided context.
263
264 Applies evaluation restrictions defined in the context.
265
266 Currently does not support evaluation of functions with arguments.
267
268 Does not evaluate actions which always have side effects:
269 - class definitions (`class sth: ...`)
270 - function definitions (`def sth: ...`)
271 - variable assignments (`x = 1`)
272 - augumented assignments (`x += 1`)
273 - deletions (`del x`)
274
275 Does not evaluate operations which do not return values:
276 - assertions (`assert x`)
277 - pass (`pass`)
278 - imports (`import x`)
279 - control flow
280 - conditionals (`if x:`) except for terenary IfExp (`a if x else b`)
281 - loops (`for` and `while`)
282 - exception handling
283 """
284 policy = EVALUATION_POLICIES[context.evaluation]
285 if node is None:
286 return None
287 if isinstance(node, ast.Expression):
288 return eval_node(node.body, context)
289 if isinstance(node, ast.BinOp):
290 # TODO: add guards
291 left = eval_node(node.left, context)
292 right = eval_node(node.right, context)
293 if isinstance(node.op, ast.Add):
294 return left + right
295 if isinstance(node.op, ast.Sub):
296 return left - right
297 if isinstance(node.op, ast.Mult):
298 return left * right
299 if isinstance(node.op, ast.Div):
300 return left / right
301 if isinstance(node.op, ast.FloorDiv):
302 return left // right
303 if isinstance(node.op, ast.Mod):
304 return left % right
305 if isinstance(node.op, ast.Pow):
306 return left ** right
307 if isinstance(node.op, ast.LShift):
308 return left << right
309 if isinstance(node.op, ast.RShift):
310 return left >> right
311 if isinstance(node.op, ast.BitOr):
312 return left | right
313 if isinstance(node.op, ast.BitXor):
314 return left ^ right
315 if isinstance(node.op, ast.BitAnd):
316 return left & right
317 if isinstance(node.op, ast.MatMult):
318 return left @ right
319 if isinstance(node, ast.Constant):
320 return node.value
321 if isinstance(node, ast.Index):
322 return eval_node(node.value, context)
323 if isinstance(node, ast.Tuple):
324 return tuple(
325 eval_node(e, context)
326 for e in node.elts
327 )
328 if isinstance(node, ast.List):
329 return [
330 eval_node(e, context)
331 for e in node.elts
332 ]
333 if isinstance(node, ast.Set):
334 return {
335 eval_node(e, context)
336 for e in node.elts
337 }
338 if isinstance(node, ast.Dict):
339 return dict(zip(
340 [eval_node(k, context) for k in node.keys],
341 [eval_node(v, context) for v in node.values]
342 ))
343 if isinstance(node, ast.Slice):
344 return slice(
345 eval_node(node.lower, context),
346 eval_node(node.upper, context),
347 eval_node(node.step, context)
348 )
349 if isinstance(node, ast.ExtSlice):
350 return tuple([
351 eval_node(dim, context)
352 for dim in node.dims
353 ])
354 if isinstance(node, ast.UnaryOp):
355 # TODO: add guards
356 value = eval_node(node.operand, context)
357 if isinstance(node.op, ast.USub):
358 return -value
359 if isinstance(node.op, ast.UAdd):
360 return +value
361 if isinstance(node.op, ast.Invert):
362 return ~value
363 if isinstance(node.op, ast.Not):
364 return not value
365 raise ValueError('Unhandled unary operation:', node.op)
366 if isinstance(node, ast.Subscript):
367 value = eval_node(node.value, context)
368 slice_ = eval_node(node.slice, context)
369 if policy.can_get_item(value, slice_):
370 return value[slice_]
371 raise GuardRejection(
372 'Subscript access (`__getitem__`) for',
373 type(value), # not joined to avoid calling `repr`
374 f' not allowed in {context.evaluation} mode'
375 )
376 if isinstance(node, ast.Name):
377 if policy.allow_locals_access and node.id in context.locals_:
378 return context.locals_[node.id]
379 if policy.allow_globals_access and node.id in context.globals_:
380 return context.globals_[node.id]
381 if policy.allow_builtins_access and node.id in __builtins__:
382 return __builtins__[node.id]
383 if not policy.allow_globals_access and not policy.allow_locals_access:
384 raise GuardRejection(
385 f'Namespace access not allowed in {context.evaluation} mode'
386 )
387 else:
388 raise NameError(f'{node.id} not found in locals nor globals')
389 if isinstance(node, ast.Attribute):
390 value = eval_node(node.value, context)
391 if policy.can_get_attr(value, node.attr):
392 return getattr(value, node.attr)
393 raise GuardRejection(
394 'Attribute access (`__getattr__`) for',
395 type(value), # not joined to avoid calling `repr`
396 f'not allowed in {context.evaluation} mode'
397 )
398 if isinstance(node, ast.IfExp):
399 test = eval_node(node.test, context)
400 if test:
401 return eval_node(node.body, context)
402 else:
403 return eval_node(node.orelse, context)
404 if isinstance(node, ast.Call):
405 func = eval_node(node.func, context)
406 print(node.keywords)
407 if policy.can_call(func) and not node.keywords:
408 args = [
409 eval_node(arg, context)
410 for arg in node.args
411 ]
412 return func(*args)
413 raise GuardRejection(
414 'Call for',
415 func, # not joined to avoid calling `repr`
416 f'not allowed in {context.evaluation} mode'
417 )
418 raise ValueError('Unhandled node', node)
419
420
421 SUPPORTED_EXTERNAL_GETITEM = {
422 ('pandas', 'core', 'indexing', '_iLocIndexer'),
423 ('pandas', 'core', 'indexing', '_LocIndexer'),
424 ('pandas', 'DataFrame'),
425 ('pandas', 'Series'),
426 ('numpy', 'ndarray'),
427 ('numpy', 'void')
428 }
429
430 BUILTIN_GETITEM = {
431 dict,
432 str,
433 bytes,
434 list,
435 tuple,
436 collections.defaultdict,
437 collections.deque,
438 collections.OrderedDict,
439 collections.ChainMap,
440 collections.UserDict,
441 collections.UserList,
442 collections.UserString,
443 DummyNamedTuple,
444 IdentitySubscript
445 }
446
447
448 def _list_methods(cls, source=None):
449 """For use on immutable objects or with methods returning a copy"""
450 return [
451 getattr(cls, k)
452 for k in (source if source else dir(cls))
453 ]
454
455
456 dict_non_mutating_methods = ('copy', 'keys', 'values', 'items')
457 list_non_mutating_methods = ('copy', 'index', 'count')
458 set_non_mutating_methods = set(dir(set)) & set(dir(frozenset))
459
460
461 dict_keys = type({}.keys())
462 method_descriptor = type(list.copy)
463
464 ALLOWED_CALLS = {
465 bytes,
466 *_list_methods(bytes),
467 dict,
468 *_list_methods(dict, dict_non_mutating_methods),
469 dict_keys.isdisjoint,
470 list,
471 *_list_methods(list, list_non_mutating_methods),
472 set,
473 *_list_methods(set, set_non_mutating_methods),
474 frozenset,
475 *_list_methods(frozenset),
476 range,
477 str,
478 *_list_methods(str),
479 tuple,
480 *_list_methods(tuple),
481 collections.deque,
482 *_list_methods(collections.deque, list_non_mutating_methods),
483 collections.defaultdict,
484 *_list_methods(collections.defaultdict, dict_non_mutating_methods),
485 collections.OrderedDict,
486 *_list_methods(collections.OrderedDict, dict_non_mutating_methods),
487 collections.UserDict,
488 *_list_methods(collections.UserDict, dict_non_mutating_methods),
489 collections.UserList,
490 *_list_methods(collections.UserList, list_non_mutating_methods),
491 collections.UserString,
492 *_list_methods(collections.UserString, dir(str)),
493 collections.Counter,
494 *_list_methods(collections.Counter, dict_non_mutating_methods),
495 collections.Counter.elements,
496 collections.Counter.most_common
497 }
498
499 EVALUATION_POLICIES = {
500 'minimal': EvaluationPolicy(
501 allow_builtins_access=True,
502 allow_locals_access=False,
503 allow_globals_access=False,
504 allow_item_access=False,
505 allow_attr_access=False,
506 allowed_calls=set(),
507 allow_any_calls=False
508 ),
509 'limitted': SelectivePolicy(
510 # TODO:
511 # - should reject binary and unary operations if custom methods would be dispatched
512 allowed_getitem=BUILTIN_GETITEM,
513 allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM,
514 allowed_getattr={
515 *BUILTIN_GETITEM,
516 set,
517 frozenset,
518 object,
519 type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`.
520 dict_keys,
521 method_descriptor
522 },
523 allowed_getattr_external={
524 # pandas Series/Frame implements custom `__getattr__`
525 ('pandas', 'DataFrame'),
526 ('pandas', 'Series')
527 },
528 allow_builtins_access=True,
529 allow_locals_access=True,
530 allow_globals_access=True,
531 allowed_calls=ALLOWED_CALLS
532 ),
533 'unsafe': EvaluationPolicy(
534 allow_builtins_access=True,
535 allow_locals_access=True,
536 allow_globals_access=True,
537 allow_attr_access=True,
538 allow_item_access=True,
539 allow_any_calls=True
540 )
541 } No newline at end of file
@@ -0,0 +1,286 b''
1 from typing import NamedTuple
2 from IPython.core.guarded_eval import EvaluationContext, GuardRejection, guarded_eval, unbind_method
3 from IPython.testing import decorators as dec
4 import pytest
5
6
7 def limitted(**kwargs):
8 return EvaluationContext(
9 locals_=kwargs,
10 globals_={},
11 evaluation='limitted'
12 )
13
14
15 def unsafe(**kwargs):
16 return EvaluationContext(
17 locals_=kwargs,
18 globals_={},
19 evaluation='unsafe'
20 )
21
22 @dec.skip_without('pandas')
23 def test_pandas_series_iloc():
24 import pandas as pd
25 series = pd.Series([1], index=['a'])
26 context = limitted(data=series)
27 assert guarded_eval('data.iloc[0]', context) == 1
28
29
30 @dec.skip_without('pandas')
31 def test_pandas_series():
32 import pandas as pd
33 context = limitted(data=pd.Series([1], index=['a']))
34 assert guarded_eval('data["a"]', context) == 1
35 with pytest.raises(KeyError):
36 guarded_eval('data["c"]', context)
37
38
39 @dec.skip_without('pandas')
40 def test_pandas_bad_series():
41 import pandas as pd
42 class BadItemSeries(pd.Series):
43 def __getitem__(self, key):
44 return 'CUSTOM_ITEM'
45
46 class BadAttrSeries(pd.Series):
47 def __getattr__(self, key):
48 return 'CUSTOM_ATTR'
49
50 bad_series = BadItemSeries([1], index=['a'])
51 context = limitted(data=bad_series)
52
53 with pytest.raises(GuardRejection):
54 guarded_eval('data["a"]', context)
55 with pytest.raises(GuardRejection):
56 guarded_eval('data["c"]', context)
57
58 # note: here result is a bit unexpected because
59 # pandas `__getattr__` calls `__getitem__`;
60 # FIXME - special case to handle it?
61 assert guarded_eval('data.a', context) == 'CUSTOM_ITEM'
62
63 context = unsafe(data=bad_series)
64 assert guarded_eval('data["a"]', context) == 'CUSTOM_ITEM'
65
66 bad_attr_series = BadAttrSeries([1], index=['a'])
67 context = limitted(data=bad_attr_series)
68 assert guarded_eval('data["a"]', context) == 1
69 with pytest.raises(GuardRejection):
70 guarded_eval('data.a', context)
71
72
73 @dec.skip_without('pandas')
74 def test_pandas_dataframe_loc():
75 import pandas as pd
76 from pandas.testing import assert_series_equal
77 data = pd.DataFrame([{'a': 1}])
78 context = limitted(data=data)
79 assert_series_equal(
80 guarded_eval('data.loc[:, "a"]', context),
81 data['a']
82 )
83
84
85 def test_named_tuple():
86
87 class GoodNamedTuple(NamedTuple):
88 a: str
89 pass
90
91 class BadNamedTuple(NamedTuple):
92 a: str
93 def __getitem__(self, key):
94 return None
95
96 good = GoodNamedTuple(a='x')
97 bad = BadNamedTuple(a='x')
98
99 context = limitted(data=good)
100 assert guarded_eval('data[0]', context) == 'x'
101
102 context = limitted(data=bad)
103 with pytest.raises(GuardRejection):
104 guarded_eval('data[0]', context)
105
106
107 def test_dict():
108 context = limitted(
109 data={'a': 1, 'b': {'x': 2}, ('x', 'y'): 3}
110 )
111 assert guarded_eval('data["a"]', context) == 1
112 assert guarded_eval('data["b"]', context) == {'x': 2}
113 assert guarded_eval('data["b"]["x"]', context) == 2
114 assert guarded_eval('data["x", "y"]', context) == 3
115
116 assert guarded_eval('data.keys', context)
117
118
119 def test_set():
120 context = limitted(data={'a', 'b'})
121 assert guarded_eval('data.difference', context)
122
123
124 def test_list():
125 context = limitted(data=[1, 2, 3])
126 assert guarded_eval('data[1]', context) == 2
127 assert guarded_eval('data.copy', context)
128
129
130 def test_dict_literal():
131 context = limitted()
132 assert guarded_eval('{}', context) == {}
133 assert guarded_eval('{"a": 1}', context) == {"a": 1}
134
135
136 def test_list_literal():
137 context = limitted()
138 assert guarded_eval('[]', context) == []
139 assert guarded_eval('[1, "a"]', context) == [1, "a"]
140
141
142 def test_set_literal():
143 context = limitted()
144 assert guarded_eval('set()', context) == set()
145 assert guarded_eval('{"a"}', context) == {"a"}
146
147
148 def test_if_expression():
149 context = limitted()
150 assert guarded_eval('2 if True else 3', context) == 2
151 assert guarded_eval('4 if False else 5', context) == 5
152
153
154 def test_object():
155 obj = object()
156 context = limitted(obj=obj)
157 assert guarded_eval('obj.__dir__', context) == obj.__dir__
158
159
160 @pytest.mark.parametrize(
161 "code,expected",
162 [
163 [
164 'int.numerator',
165 int.numerator
166 ],
167 [
168 'float.is_integer',
169 float.is_integer
170 ],
171 [
172 'complex.real',
173 complex.real
174 ]
175 ]
176 )
177 def test_number_attributes(code, expected):
178 assert guarded_eval(code, limitted()) == expected
179
180
181 def test_method_descriptor():
182 context = limitted()
183 assert guarded_eval('list.copy.__name__', context) == 'copy'
184
185
186 @pytest.mark.parametrize(
187 "data,good,bad,expected",
188 [
189 [
190 [1, 2, 3],
191 'data.index(2)',
192 'data.append(4)',
193 1
194 ],
195 [
196 {'a': 1},
197 'data.keys().isdisjoint({})',
198 'data.update()',
199 True
200 ]
201 ]
202 )
203 def test_calls(data, good, bad, expected):
204 context = limitted(data=data)
205 assert guarded_eval(good, context) == expected
206
207 with pytest.raises(GuardRejection):
208 guarded_eval(bad, context)
209
210
211 @pytest.mark.parametrize(
212 "code,expected",
213 [
214 [
215 '(1\n+\n1)',
216 2
217 ],
218 [
219 'list(range(10))[-1:]',
220 [9]
221 ],
222 [
223 'list(range(20))[3:-2:3]',
224 [3, 6, 9, 12, 15]
225 ]
226 ]
227 )
228 def test_literals(code, expected):
229 context = limitted()
230 assert guarded_eval(code, context) == expected
231
232
233 def test_subscript():
234 context = EvaluationContext(
235 locals_={},
236 globals_={},
237 evaluation='limitted',
238 in_subscript=True
239 )
240 empty_slice = slice(None, None, None)
241 assert guarded_eval('', context) == tuple()
242 assert guarded_eval(':', context) == empty_slice
243 assert guarded_eval('1:2:3', context) == slice(1, 2, 3)
244 assert guarded_eval(':, "a"', context) == (empty_slice, "a")
245
246
247 def test_unbind_method():
248 class X(list):
249 def index(self, k):
250 return 'CUSTOM'
251 x = X()
252 assert unbind_method(x.index) is X.index
253 assert unbind_method([].index) is list.index
254
255
256 def test_assumption_instance_attr_do_not_matter():
257 """This is semi-specified in Python documentation.
258
259 However, since the specification says 'not guaranted
260 to work' rather than 'is forbidden to work', future
261 versions could invalidate this assumptions. This test
262 is meant to catch such a change if it ever comes true.
263 """
264 class T:
265 def __getitem__(self, k):
266 return 'a'
267 def __getattr__(self, k):
268 return 'a'
269 t = T()
270 t.__getitem__ = lambda f: 'b'
271 t.__getattr__ = lambda f: 'b'
272 assert t[1] == 'a'
273 assert t[1] == 'a'
274
275
276 def test_assumption_named_tuples_share_getitem():
277 """Check assumption on named tuples sharing __getitem__"""
278 from typing import NamedTuple
279
280 class A(NamedTuple):
281 pass
282
283 class B(NamedTuple):
284 pass
285
286 assert A.__getitem__ == B.__getitem__
@@ -1,2977 +1,3036 b''
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 Ξ±
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 Ξ±
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press ``<tab>`` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\Ξ±<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 ``Completer.backslash_combining_completions`` option to ``False``.
63 63
64 64
65 65 Experimental
66 66 ============
67 67
68 68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 69 generate completions both using static analysis of the code, and dynamically
70 70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 71 for Python. The APIs attached to this new mechanism is unstable and will
72 72 raise unless use in an :any:`provisionalcompleter` context manager.
73 73
74 74 You will find that the following are experimental:
75 75
76 76 - :any:`provisionalcompleter`
77 77 - :any:`IPCompleter.completions`
78 78 - :any:`Completion`
79 79 - :any:`rectify_completions`
80 80
81 81 .. note::
82 82
83 83 better name for :any:`rectify_completions` ?
84 84
85 85 We welcome any feedback on these new API, and we also encourage you to try this
86 86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 87 to have extra logging information if :any:`jedi` is crashing, or if current
88 88 IPython completer pending deprecations are returning results not yet handled
89 89 by :any:`jedi`
90 90
91 91 Using Jedi for tab completion allow snippets like the following to work without
92 92 having to execute any code:
93 93
94 94 >>> myvar = ['hello', 42]
95 95 ... myvar[1].bi<tab>
96 96
97 97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 98 executing any code unlike the previously available ``IPCompleter.greedy``
99 99 option.
100 100
101 101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 102 current development version to get better completions.
103 103
104 104 Matchers
105 105 ========
106 106
107 107 All completions routines are implemented using unified *Matchers* API.
108 108 The matchers API is provisional and subject to change without notice.
109 109
110 110 The built-in matchers include:
111 111
112 112 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
113 113 - :any:`IPCompleter.magic_matcher`: completions for magics,
114 114 - :any:`IPCompleter.unicode_name_matcher`,
115 115 :any:`IPCompleter.fwd_unicode_matcher`
116 116 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
117 117 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
118 118 - :any:`IPCompleter.file_matcher`: paths to files and directories,
119 119 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
120 120 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
121 121 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
122 122 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
123 123 implementation in :any:`InteractiveShell` which uses IPython hooks system
124 124 (`complete_command`) with string dispatch (including regular expressions).
125 125 Differently to other matchers, ``custom_completer_matcher`` will not suppress
126 126 Jedi results to match behaviour in earlier IPython versions.
127 127
128 128 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
129 129
130 130 Matcher API
131 131 -----------
132 132
133 133 Simplifying some details, the ``Matcher`` interface can described as
134 134
135 135 .. code-block::
136 136
137 137 MatcherAPIv1 = Callable[[str], list[str]]
138 138 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
139 139
140 140 Matcher = MatcherAPIv1 | MatcherAPIv2
141 141
142 142 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
143 143 and remains supported as a simplest way for generating completions. This is also
144 144 currently the only API supported by the IPython hooks system `complete_command`.
145 145
146 146 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
147 147 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
148 148 and requires a literal ``2`` for v2 Matchers.
149 149
150 150 Once the API stabilises future versions may relax the requirement for specifying
151 151 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
152 152 please do not rely on the presence of ``matcher_api_version`` for any purposes.
153 153
154 154 Suppression of competing matchers
155 155 ---------------------------------
156 156
157 157 By default results from all matchers are combined, in the order determined by
158 158 their priority. Matchers can request to suppress results from subsequent
159 159 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
160 160
161 161 When multiple matchers simultaneously request surpression, the results from of
162 162 the matcher with higher priority will be returned.
163 163
164 164 Sometimes it is desirable to suppress most but not all other matchers;
165 165 this can be achieved by adding a list of identifiers of matchers which
166 166 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
167 167
168 168 The suppression behaviour can is user-configurable via
169 169 :any:`IPCompleter.suppress_competing_matchers`.
170 170 """
171 171
172 172
173 173 # Copyright (c) IPython Development Team.
174 174 # Distributed under the terms of the Modified BSD License.
175 175 #
176 176 # Some of this code originated from rlcompleter in the Python standard library
177 177 # Copyright (C) 2001 Python Software Foundation, www.python.org
178 178
179 179 from __future__ import annotations
180 180 import builtins as builtin_mod
181 181 import glob
182 182 import inspect
183 183 import itertools
184 184 import keyword
185 185 import os
186 186 import re
187 187 import string
188 188 import sys
189 189 import time
190 190 import unicodedata
191 191 import uuid
192 192 import warnings
193 from ast import literal_eval
193 194 from contextlib import contextmanager
194 195 from dataclasses import dataclass
195 196 from functools import cached_property, partial
196 197 from importlib import import_module
197 198 from types import SimpleNamespace
198 199 from typing import (
199 200 Iterable,
200 201 Iterator,
201 202 List,
202 203 Tuple,
203 204 Union,
204 205 Any,
205 206 Sequence,
206 207 Dict,
207 208 NamedTuple,
208 209 Pattern,
209 210 Optional,
210 211 TYPE_CHECKING,
211 212 Set,
212 213 Literal,
213 214 )
214 215
216 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
215 217 from IPython.core.error import TryNext
216 218 from IPython.core.inputtransformer2 import ESC_MAGIC
217 219 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
218 220 from IPython.core.oinspect import InspectColors
219 221 from IPython.testing.skipdoctest import skip_doctest
220 222 from IPython.utils import generics
221 223 from IPython.utils.decorators import sphinx_options
222 224 from IPython.utils.dir2 import dir2, get_real_method
223 225 from IPython.utils.docs import GENERATING_DOCUMENTATION
224 226 from IPython.utils.path import ensure_dir_exists
225 227 from IPython.utils.process import arg_split
226 228 from traitlets import (
227 229 Bool,
228 230 Enum,
229 231 Int,
230 232 List as ListTrait,
231 233 Unicode,
232 234 Dict as DictTrait,
233 235 Union as UnionTrait,
234 236 default,
235 237 observe,
236 238 )
237 239 from traitlets.config.configurable import Configurable
238 240
239 241 import __main__
240 242
241 243 # skip module docstests
242 244 __skip_doctest__ = True
243 245
244 246
245 247 try:
246 248 import jedi
247 249 jedi.settings.case_insensitive_completion = False
248 250 import jedi.api.helpers
249 251 import jedi.api.classes
250 252 JEDI_INSTALLED = True
251 253 except ImportError:
252 254 JEDI_INSTALLED = False
253 255
254 256
255 257 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
256 258 from typing import cast
257 259 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias
258 260 else:
259 261
260 262 def cast(obj, type_):
261 263 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
262 264 return obj
263 265
264 266 # do not require on runtime
265 267 NotRequired = Tuple # requires Python >=3.11
266 268 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
267 269 Protocol = object # requires Python >=3.8
268 270 TypeAlias = Any # requires Python >=3.10
269 271 if GENERATING_DOCUMENTATION:
270 272 from typing import TypedDict
271 273
272 274 # -----------------------------------------------------------------------------
273 275 # Globals
274 276 #-----------------------------------------------------------------------------
275 277
276 278 # ranges where we have most of the valid unicode names. We could be more finer
277 279 # grained but is it worth it for performance While unicode have character in the
278 280 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
279 281 # write this). With below range we cover them all, with a density of ~67%
280 282 # biggest next gap we consider only adds up about 1% density and there are 600
281 283 # gaps that would need hard coding.
282 284 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
283 285
284 286 # Public API
285 287 __all__ = ["Completer", "IPCompleter"]
286 288
287 289 if sys.platform == 'win32':
288 290 PROTECTABLES = ' '
289 291 else:
290 292 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
291 293
292 294 # Protect against returning an enormous number of completions which the frontend
293 295 # may have trouble processing.
294 296 MATCHES_LIMIT = 500
295 297
296 298 # Completion type reported when no type can be inferred.
297 299 _UNKNOWN_TYPE = "<unknown>"
298 300
301 # sentinel value to signal lack of a match
302 not_found = object()
303
299 304 class ProvisionalCompleterWarning(FutureWarning):
300 305 """
301 306 Exception raise by an experimental feature in this module.
302 307
303 308 Wrap code in :any:`provisionalcompleter` context manager if you
304 309 are certain you want to use an unstable feature.
305 310 """
306 311 pass
307 312
308 313 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
309 314
310 315
311 316 @skip_doctest
312 317 @contextmanager
313 318 def provisionalcompleter(action='ignore'):
314 319 """
315 320 This context manager has to be used in any place where unstable completer
316 321 behavior and API may be called.
317 322
318 323 >>> with provisionalcompleter():
319 324 ... completer.do_experimental_things() # works
320 325
321 326 >>> completer.do_experimental_things() # raises.
322 327
323 328 .. note::
324 329
325 330 Unstable
326 331
327 332 By using this context manager you agree that the API in use may change
328 333 without warning, and that you won't complain if they do so.
329 334
330 335 You also understand that, if the API is not to your liking, you should report
331 336 a bug to explain your use case upstream.
332 337
333 338 We'll be happy to get your feedback, feature requests, and improvements on
334 339 any of the unstable APIs!
335 340 """
336 341 with warnings.catch_warnings():
337 342 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
338 343 yield
339 344
340 345
341 346 def has_open_quotes(s):
342 347 """Return whether a string has open quotes.
343 348
344 349 This simply counts whether the number of quote characters of either type in
345 350 the string is odd.
346 351
347 352 Returns
348 353 -------
349 354 If there is an open quote, the quote character is returned. Else, return
350 355 False.
351 356 """
352 357 # We check " first, then ', so complex cases with nested quotes will get
353 358 # the " to take precedence.
354 359 if s.count('"') % 2:
355 360 return '"'
356 361 elif s.count("'") % 2:
357 362 return "'"
358 363 else:
359 364 return False
360 365
361 366
362 367 def protect_filename(s, protectables=PROTECTABLES):
363 368 """Escape a string to protect certain characters."""
364 369 if set(s) & set(protectables):
365 370 if sys.platform == "win32":
366 371 return '"' + s + '"'
367 372 else:
368 373 return "".join(("\\" + c if c in protectables else c) for c in s)
369 374 else:
370 375 return s
371 376
372 377
373 378 def expand_user(path:str) -> Tuple[str, bool, str]:
374 379 """Expand ``~``-style usernames in strings.
375 380
376 381 This is similar to :func:`os.path.expanduser`, but it computes and returns
377 382 extra information that will be useful if the input was being used in
378 383 computing completions, and you wish to return the completions with the
379 384 original '~' instead of its expanded value.
380 385
381 386 Parameters
382 387 ----------
383 388 path : str
384 389 String to be expanded. If no ~ is present, the output is the same as the
385 390 input.
386 391
387 392 Returns
388 393 -------
389 394 newpath : str
390 395 Result of ~ expansion in the input path.
391 396 tilde_expand : bool
392 397 Whether any expansion was performed or not.
393 398 tilde_val : str
394 399 The value that ~ was replaced with.
395 400 """
396 401 # Default values
397 402 tilde_expand = False
398 403 tilde_val = ''
399 404 newpath = path
400 405
401 406 if path.startswith('~'):
402 407 tilde_expand = True
403 408 rest = len(path)-1
404 409 newpath = os.path.expanduser(path)
405 410 if rest:
406 411 tilde_val = newpath[:-rest]
407 412 else:
408 413 tilde_val = newpath
409 414
410 415 return newpath, tilde_expand, tilde_val
411 416
412 417
413 418 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
414 419 """Does the opposite of expand_user, with its outputs.
415 420 """
416 421 if tilde_expand:
417 422 return path.replace(tilde_val, '~')
418 423 else:
419 424 return path
420 425
421 426
422 427 def completions_sorting_key(word):
423 428 """key for sorting completions
424 429
425 430 This does several things:
426 431
427 432 - Demote any completions starting with underscores to the end
428 433 - Insert any %magic and %%cellmagic completions in the alphabetical order
429 434 by their name
430 435 """
431 436 prio1, prio2 = 0, 0
432 437
433 438 if word.startswith('__'):
434 439 prio1 = 2
435 440 elif word.startswith('_'):
436 441 prio1 = 1
437 442
438 443 if word.endswith('='):
439 444 prio1 = -1
440 445
441 446 if word.startswith('%%'):
442 447 # If there's another % in there, this is something else, so leave it alone
443 448 if not "%" in word[2:]:
444 449 word = word[2:]
445 450 prio2 = 2
446 451 elif word.startswith('%'):
447 452 if not "%" in word[1:]:
448 453 word = word[1:]
449 454 prio2 = 1
450 455
451 456 return prio1, word, prio2
452 457
453 458
454 459 class _FakeJediCompletion:
455 460 """
456 461 This is a workaround to communicate to the UI that Jedi has crashed and to
457 462 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
458 463
459 464 Added in IPython 6.0 so should likely be removed for 7.0
460 465
461 466 """
462 467
463 468 def __init__(self, name):
464 469
465 470 self.name = name
466 471 self.complete = name
467 472 self.type = 'crashed'
468 473 self.name_with_symbols = name
469 474 self.signature = ''
470 475 self._origin = 'fake'
471 476
472 477 def __repr__(self):
473 478 return '<Fake completion object jedi has crashed>'
474 479
475 480
476 481 _JediCompletionLike = Union[jedi.api.Completion, _FakeJediCompletion]
477 482
478 483
479 484 class Completion:
480 485 """
481 486 Completion object used and returned by IPython completers.
482 487
483 488 .. warning::
484 489
485 490 Unstable
486 491
487 492 This function is unstable, API may change without warning.
488 493 It will also raise unless use in proper context manager.
489 494
490 495 This act as a middle ground :any:`Completion` object between the
491 496 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
492 497 object. While Jedi need a lot of information about evaluator and how the
493 498 code should be ran/inspected, PromptToolkit (and other frontend) mostly
494 499 need user facing information.
495 500
496 501 - Which range should be replaced replaced by what.
497 502 - Some metadata (like completion type), or meta information to displayed to
498 503 the use user.
499 504
500 505 For debugging purpose we can also store the origin of the completion (``jedi``,
501 506 ``IPython.python_matches``, ``IPython.magics_matches``...).
502 507 """
503 508
504 509 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
505 510
506 511 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
507 512 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
508 513 "It may change without warnings. "
509 514 "Use in corresponding context manager.",
510 515 category=ProvisionalCompleterWarning, stacklevel=2)
511 516
512 517 self.start = start
513 518 self.end = end
514 519 self.text = text
515 520 self.type = type
516 521 self.signature = signature
517 522 self._origin = _origin
518 523
519 524 def __repr__(self):
520 525 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
521 526 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
522 527
523 528 def __eq__(self, other)->Bool:
524 529 """
525 530 Equality and hash do not hash the type (as some completer may not be
526 531 able to infer the type), but are use to (partially) de-duplicate
527 532 completion.
528 533
529 534 Completely de-duplicating completion is a bit tricker that just
530 535 comparing as it depends on surrounding text, which Completions are not
531 536 aware of.
532 537 """
533 538 return self.start == other.start and \
534 539 self.end == other.end and \
535 540 self.text == other.text
536 541
537 542 def __hash__(self):
538 543 return hash((self.start, self.end, self.text))
539 544
540 545
541 546 class SimpleCompletion:
542 547 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
543 548
544 549 .. warning::
545 550
546 551 Provisional
547 552
548 553 This class is used to describe the currently supported attributes of
549 554 simple completion items, and any additional implementation details
550 555 should not be relied on. Additional attributes may be included in
551 556 future versions, and meaning of text disambiguated from the current
552 557 dual meaning of "text to insert" and "text to used as a label".
553 558 """
554 559
555 560 __slots__ = ["text", "type"]
556 561
557 562 def __init__(self, text: str, *, type: str = None):
558 563 self.text = text
559 564 self.type = type
560 565
561 566 def __repr__(self):
562 567 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
563 568
564 569
565 570 class _MatcherResultBase(TypedDict):
566 571 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
567 572
568 573 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
569 574 matched_fragment: NotRequired[str]
570 575
571 576 #: Whether to suppress results from all other matchers (True), some
572 577 #: matchers (set of identifiers) or none (False); default is False.
573 578 suppress: NotRequired[Union[bool, Set[str]]]
574 579
575 580 #: Identifiers of matchers which should NOT be suppressed when this matcher
576 581 #: requests to suppress all other matchers; defaults to an empty set.
577 582 do_not_suppress: NotRequired[Set[str]]
578 583
579 584 #: Are completions already ordered and should be left as-is? default is False.
580 585 ordered: NotRequired[bool]
581 586
582 587
583 588 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
584 589 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
585 590 """Result of new-style completion matcher."""
586 591
587 592 # note: TypedDict is added again to the inheritance chain
588 593 # in order to get __orig_bases__ for documentation
589 594
590 595 #: List of candidate completions
591 596 completions: Sequence[SimpleCompletion]
592 597
593 598
594 599 class _JediMatcherResult(_MatcherResultBase):
595 600 """Matching result returned by Jedi (will be processed differently)"""
596 601
597 602 #: list of candidate completions
598 603 completions: Iterable[_JediCompletionLike]
599 604
600 605
601 606 @dataclass
602 607 class CompletionContext:
603 608 """Completion context provided as an argument to matchers in the Matcher API v2."""
604 609
605 610 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
606 611 # which was not explicitly visible as an argument of the matcher, making any refactor
607 612 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
608 613 # from the completer, and make substituting them in sub-classes easier.
609 614
610 615 #: Relevant fragment of code directly preceding the cursor.
611 616 #: The extraction of token is implemented via splitter heuristic
612 617 #: (following readline behaviour for legacy reasons), which is user configurable
613 618 #: (by switching the greedy mode).
614 619 token: str
615 620
616 621 #: The full available content of the editor or buffer
617 622 full_text: str
618 623
619 624 #: Cursor position in the line (the same for ``full_text`` and ``text``).
620 625 cursor_position: int
621 626
622 627 #: Cursor line in ``full_text``.
623 628 cursor_line: int
624 629
625 630 #: The maximum number of completions that will be used downstream.
626 631 #: Matchers can use this information to abort early.
627 632 #: The built-in Jedi matcher is currently excepted from this limit.
628 633 # If not given, return all possible completions.
629 634 limit: Optional[int]
630 635
631 636 @cached_property
632 637 def text_until_cursor(self) -> str:
633 638 return self.line_with_cursor[: self.cursor_position]
634 639
635 640 @cached_property
636 641 def line_with_cursor(self) -> str:
637 642 return self.full_text.split("\n")[self.cursor_line]
638 643
639 644
640 645 #: Matcher results for API v2.
641 646 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
642 647
643 648
644 649 class _MatcherAPIv1Base(Protocol):
645 650 def __call__(self, text: str) -> list[str]:
646 651 """Call signature."""
647 652
648 653
649 654 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
650 655 #: API version
651 656 matcher_api_version: Optional[Literal[1]]
652 657
653 658 def __call__(self, text: str) -> list[str]:
654 659 """Call signature."""
655 660
656 661
657 662 #: Protocol describing Matcher API v1.
658 663 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
659 664
660 665
661 666 class MatcherAPIv2(Protocol):
662 667 """Protocol describing Matcher API v2."""
663 668
664 669 #: API version
665 670 matcher_api_version: Literal[2] = 2
666 671
667 672 def __call__(self, context: CompletionContext) -> MatcherResult:
668 673 """Call signature."""
669 674
670 675
671 676 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
672 677
673 678
674 679 def has_any_completions(result: MatcherResult) -> bool:
675 680 """Check if any result includes any completions."""
676 681 if hasattr(result["completions"], "__len__"):
677 682 return len(result["completions"]) != 0
678 683 try:
679 684 old_iterator = result["completions"]
680 685 first = next(old_iterator)
681 686 result["completions"] = itertools.chain([first], old_iterator)
682 687 return True
683 688 except StopIteration:
684 689 return False
685 690
686 691
687 692 def completion_matcher(
688 693 *, priority: float = None, identifier: str = None, api_version: int = 1
689 694 ):
690 695 """Adds attributes describing the matcher.
691 696
692 697 Parameters
693 698 ----------
694 699 priority : Optional[float]
695 700 The priority of the matcher, determines the order of execution of matchers.
696 701 Higher priority means that the matcher will be executed first. Defaults to 0.
697 702 identifier : Optional[str]
698 703 identifier of the matcher allowing users to modify the behaviour via traitlets,
699 704 and also used to for debugging (will be passed as ``origin`` with the completions).
700 705
701 706 Defaults to matcher function's ``__qualname__`` (for example,
702 707 ``IPCompleter.file_matcher`` for the built-in matched defined
703 708 as a ``file_matcher`` method of the ``IPCompleter`` class).
704 709 api_version: Optional[int]
705 710 version of the Matcher API used by this matcher.
706 711 Currently supported values are 1 and 2.
707 712 Defaults to 1.
708 713 """
709 714
710 715 def wrapper(func: Matcher):
711 716 func.matcher_priority = priority or 0
712 717 func.matcher_identifier = identifier or func.__qualname__
713 718 func.matcher_api_version = api_version
714 719 if TYPE_CHECKING:
715 720 if api_version == 1:
716 721 func = cast(func, MatcherAPIv1)
717 722 elif api_version == 2:
718 723 func = cast(func, MatcherAPIv2)
719 724 return func
720 725
721 726 return wrapper
722 727
723 728
724 729 def _get_matcher_priority(matcher: Matcher):
725 730 return getattr(matcher, "matcher_priority", 0)
726 731
727 732
728 733 def _get_matcher_id(matcher: Matcher):
729 734 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
730 735
731 736
732 737 def _get_matcher_api_version(matcher):
733 738 return getattr(matcher, "matcher_api_version", 1)
734 739
735 740
736 741 context_matcher = partial(completion_matcher, api_version=2)
737 742
738 743
739 744 _IC = Iterable[Completion]
740 745
741 746
742 747 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
743 748 """
744 749 Deduplicate a set of completions.
745 750
746 751 .. warning::
747 752
748 753 Unstable
749 754
750 755 This function is unstable, API may change without warning.
751 756
752 757 Parameters
753 758 ----------
754 759 text : str
755 760 text that should be completed.
756 761 completions : Iterator[Completion]
757 762 iterator over the completions to deduplicate
758 763
759 764 Yields
760 765 ------
761 766 `Completions` objects
762 767 Completions coming from multiple sources, may be different but end up having
763 768 the same effect when applied to ``text``. If this is the case, this will
764 769 consider completions as equal and only emit the first encountered.
765 770 Not folded in `completions()` yet for debugging purpose, and to detect when
766 771 the IPython completer does return things that Jedi does not, but should be
767 772 at some point.
768 773 """
769 774 completions = list(completions)
770 775 if not completions:
771 776 return
772 777
773 778 new_start = min(c.start for c in completions)
774 779 new_end = max(c.end for c in completions)
775 780
776 781 seen = set()
777 782 for c in completions:
778 783 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
779 784 if new_text not in seen:
780 785 yield c
781 786 seen.add(new_text)
782 787
783 788
784 789 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
785 790 """
786 791 Rectify a set of completions to all have the same ``start`` and ``end``
787 792
788 793 .. warning::
789 794
790 795 Unstable
791 796
792 797 This function is unstable, API may change without warning.
793 798 It will also raise unless use in proper context manager.
794 799
795 800 Parameters
796 801 ----------
797 802 text : str
798 803 text that should be completed.
799 804 completions : Iterator[Completion]
800 805 iterator over the completions to rectify
801 806 _debug : bool
802 807 Log failed completion
803 808
804 809 Notes
805 810 -----
806 811 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
807 812 the Jupyter Protocol requires them to behave like so. This will readjust
808 813 the completion to have the same ``start`` and ``end`` by padding both
809 814 extremities with surrounding text.
810 815
811 816 During stabilisation should support a ``_debug`` option to log which
812 817 completion are return by the IPython completer and not found in Jedi in
813 818 order to make upstream bug report.
814 819 """
815 820 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
816 821 "It may change without warnings. "
817 822 "Use in corresponding context manager.",
818 823 category=ProvisionalCompleterWarning, stacklevel=2)
819 824
820 825 completions = list(completions)
821 826 if not completions:
822 827 return
823 828 starts = (c.start for c in completions)
824 829 ends = (c.end for c in completions)
825 830
826 831 new_start = min(starts)
827 832 new_end = max(ends)
828 833
829 834 seen_jedi = set()
830 835 seen_python_matches = set()
831 836 for c in completions:
832 837 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
833 838 if c._origin == 'jedi':
834 839 seen_jedi.add(new_text)
835 840 elif c._origin == 'IPCompleter.python_matches':
836 841 seen_python_matches.add(new_text)
837 842 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
838 843 diff = seen_python_matches.difference(seen_jedi)
839 844 if diff and _debug:
840 845 print('IPython.python matches have extras:', diff)
841 846
842 847
843 848 if sys.platform == 'win32':
844 849 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
845 850 else:
846 851 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
847 852
848 853 GREEDY_DELIMS = ' =\r\n'
849 854
850 855
851 856 class CompletionSplitter(object):
852 857 """An object to split an input line in a manner similar to readline.
853 858
854 859 By having our own implementation, we can expose readline-like completion in
855 860 a uniform manner to all frontends. This object only needs to be given the
856 861 line of text to be split and the cursor position on said line, and it
857 862 returns the 'word' to be completed on at the cursor after splitting the
858 863 entire line.
859 864
860 865 What characters are used as splitting delimiters can be controlled by
861 866 setting the ``delims`` attribute (this is a property that internally
862 867 automatically builds the necessary regular expression)"""
863 868
864 869 # Private interface
865 870
866 871 # A string of delimiter characters. The default value makes sense for
867 872 # IPython's most typical usage patterns.
868 873 _delims = DELIMS
869 874
870 875 # The expression (a normal string) to be compiled into a regular expression
871 876 # for actual splitting. We store it as an attribute mostly for ease of
872 877 # debugging, since this type of code can be so tricky to debug.
873 878 _delim_expr = None
874 879
875 880 # The regular expression that does the actual splitting
876 881 _delim_re = None
877 882
878 883 def __init__(self, delims=None):
879 884 delims = CompletionSplitter._delims if delims is None else delims
880 885 self.delims = delims
881 886
882 887 @property
883 888 def delims(self):
884 889 """Return the string of delimiter characters."""
885 890 return self._delims
886 891
887 892 @delims.setter
888 893 def delims(self, delims):
889 894 """Set the delimiters for line splitting."""
890 895 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
891 896 self._delim_re = re.compile(expr)
892 897 self._delims = delims
893 898 self._delim_expr = expr
894 899
895 900 def split_line(self, line, cursor_pos=None):
896 901 """Split a line of text with a cursor at the given position.
897 902 """
898 903 l = line if cursor_pos is None else line[:cursor_pos]
899 904 return self._delim_re.split(l)[-1]
900 905
901 906
902 907
903 908 class Completer(Configurable):
904 909
905 greedy = Bool(False,
906 help="""Activate greedy completion
907 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
910 greedy = Bool(
911 False,
912 help="""Activate greedy completion.
913
914 .. deprecated:: 8.8
915 Use :any:`evaluation` instead.
916
917 As of IPython 8.8 proxy for ``evaluation = 'unsafe'`` when set to ``True``,
918 and for ``'forbidden'`` when set to ``False``.
919 """,
920 ).tag(config=True)
908 921
909 This will enable completion on elements of lists, results of function calls, etc.,
910 but can be unsafe because the code is actually evaluated on TAB.
922 evaluation = Enum(
923 ('forbidden', 'minimal', 'limitted', 'unsafe', 'dangerous'),
924 default_value='limitted',
925 help="""Code evaluation under completion.
926
927 Successive options allow to enable more eager evaluation for more accurate completion suggestions,
928 including for nested dictionaries, nested lists, or even results of function calls. Setting `unsafe`
929 or higher can lead to evaluation of arbitrary user code on TAB with potentially dangerous side effects.
930
931 Allowed values are:
932 - `forbidden`: no evaluation at all
933 - `minimal`: evaluation of literals and access to built-in namespaces; no item/attribute evaluation nor access to locals/globals
934 - `limitted` (default): access to all namespaces, evaluation of hard-coded methods (``keys()``, ``__getattr__``, ``__getitems__``, etc) on allow-listed objects (e.g. ``dict``, ``list``, ``tuple``, ``pandas.Series``)
935 - `unsafe`: evaluation of all methods and function calls but not of syntax with side-effects like `del x`,
936 - `dangerous`: completely arbitrary evaluation
911 937 """,
912 938 ).tag(config=True)
913 939
914 940 use_jedi = Bool(default_value=JEDI_INSTALLED,
915 941 help="Experimental: Use Jedi to generate autocompletions. "
916 942 "Default to True if jedi is installed.").tag(config=True)
917 943
918 944 jedi_compute_type_timeout = Int(default_value=400,
919 945 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
920 946 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
921 947 performance by preventing jedi to build its cache.
922 948 """).tag(config=True)
923 949
924 950 debug = Bool(default_value=False,
925 951 help='Enable debug for the Completer. Mostly print extra '
926 952 'information for experimental jedi integration.')\
927 953 .tag(config=True)
928 954
929 955 backslash_combining_completions = Bool(True,
930 956 help="Enable unicode completions, e.g. \\alpha<tab> . "
931 957 "Includes completion of latex commands, unicode names, and expanding "
932 958 "unicode characters back to latex commands.").tag(config=True)
933 959
934 960 def __init__(self, namespace=None, global_namespace=None, **kwargs):
935 961 """Create a new completer for the command line.
936 962
937 963 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
938 964
939 965 If unspecified, the default namespace where completions are performed
940 966 is __main__ (technically, __main__.__dict__). Namespaces should be
941 967 given as dictionaries.
942 968
943 969 An optional second namespace can be given. This allows the completer
944 970 to handle cases where both the local and global scopes need to be
945 971 distinguished.
946 972 """
947 973
948 974 # Don't bind to namespace quite yet, but flag whether the user wants a
949 975 # specific namespace or to use __main__.__dict__. This will allow us
950 976 # to bind to __main__.__dict__ at completion time, not now.
951 977 if namespace is None:
952 978 self.use_main_ns = True
953 979 else:
954 980 self.use_main_ns = False
955 981 self.namespace = namespace
956 982
957 983 # The global namespace, if given, can be bound directly
958 984 if global_namespace is None:
959 985 self.global_namespace = {}
960 986 else:
961 987 self.global_namespace = global_namespace
962 988
963 989 self.custom_matchers = []
964 990
965 991 super(Completer, self).__init__(**kwargs)
966 992
967 993 def complete(self, text, state):
968 994 """Return the next possible completion for 'text'.
969 995
970 996 This is called successively with state == 0, 1, 2, ... until it
971 997 returns None. The completion should begin with 'text'.
972 998
973 999 """
974 1000 if self.use_main_ns:
975 1001 self.namespace = __main__.__dict__
976 1002
977 1003 if state == 0:
978 1004 if "." in text:
979 1005 self.matches = self.attr_matches(text)
980 1006 else:
981 1007 self.matches = self.global_matches(text)
982 1008 try:
983 1009 return self.matches[state]
984 1010 except IndexError:
985 1011 return None
986 1012
987 1013 def global_matches(self, text):
988 1014 """Compute matches when text is a simple name.
989 1015
990 1016 Return a list of all keywords, built-in functions and names currently
991 1017 defined in self.namespace or self.global_namespace that match.
992 1018
993 1019 """
994 1020 matches = []
995 1021 match_append = matches.append
996 1022 n = len(text)
997 1023 for lst in [
998 1024 keyword.kwlist,
999 1025 builtin_mod.__dict__.keys(),
1000 1026 list(self.namespace.keys()),
1001 1027 list(self.global_namespace.keys()),
1002 1028 ]:
1003 1029 for word in lst:
1004 1030 if word[:n] == text and word != "__builtins__":
1005 1031 match_append(word)
1006 1032
1007 1033 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1008 1034 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1009 1035 shortened = {
1010 1036 "_".join([sub[0] for sub in word.split("_")]): word
1011 1037 for word in lst
1012 1038 if snake_case_re.match(word)
1013 1039 }
1014 1040 for word in shortened.keys():
1015 1041 if word[:n] == text and word != "__builtins__":
1016 1042 match_append(shortened[word])
1017 1043 return matches
1018 1044
1019 1045 def attr_matches(self, text):
1020 1046 """Compute matches when text contains a dot.
1021 1047
1022 1048 Assuming the text is of the form NAME.NAME....[NAME], and is
1023 1049 evaluatable in self.namespace or self.global_namespace, it will be
1024 1050 evaluated and its attributes (as revealed by dir()) are used as
1025 1051 possible completions. (For class instances, class members are
1026 1052 also considered.)
1027 1053
1028 1054 WARNING: this can still invoke arbitrary C code, if an object
1029 1055 with a __getattr__ hook is evaluated.
1030 1056
1031 1057 """
1032
1033 # Another option, seems to work great. Catches things like ''.<tab>
1034 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
1035
1036 if m:
1037 expr, attr = m.group(1, 3)
1038 elif self.greedy:
1039 1058 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1040 1059 if not m2:
1041 1060 return []
1042 1061 expr, attr = m2.group(1,2)
1043 else:
1044 return []
1045 1062
1046 try:
1047 obj = eval(expr, self.namespace)
1048 except:
1049 try:
1050 obj = eval(expr, self.global_namespace)
1051 except:
1063 obj = self._evaluate_expr(expr)
1064
1065 if obj is not_found:
1052 1066 return []
1053 1067
1054 1068 if self.limit_to__all__ and hasattr(obj, '__all__'):
1055 1069 words = get__all__entries(obj)
1056 1070 else:
1057 1071 words = dir2(obj)
1058 1072
1059 1073 try:
1060 1074 words = generics.complete_object(obj, words)
1061 1075 except TryNext:
1062 1076 pass
1063 1077 except AssertionError:
1064 1078 raise
1065 1079 except Exception:
1066 1080 # Silence errors from completion function
1067 1081 #raise # dbg
1068 1082 pass
1069 1083 # Build match list to return
1070 1084 n = len(attr)
1071 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
1085 return ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
1086
1072 1087
1088 def _evaluate_expr(self, expr):
1089 obj = not_found
1090 done = False
1091 while not done and expr:
1092 try:
1093 obj = guarded_eval(
1094 expr,
1095 EvaluationContext(
1096 globals_=self.global_namespace,
1097 locals_=self.namespace,
1098 evaluation=self.evaluation
1099 )
1100 )
1101 done = True
1102 except Exception as e:
1103 if self.debug:
1104 print('Evaluation exception', e)
1105 # trim the expression to remove any invalid prefix
1106 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1107 # where parenthesis is not closed.
1108 # TODO: make this faster by reusing parts of the computation?
1109 expr = expr[1:]
1110 return obj
1073 1111
1074 1112 def get__all__entries(obj):
1075 1113 """returns the strings in the __all__ attribute"""
1076 1114 try:
1077 1115 words = getattr(obj, '__all__')
1078 1116 except:
1079 1117 return []
1080 1118
1081 1119 return [w for w in words if isinstance(w, str)]
1082 1120
1083 1121
1084 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
1085 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
1122 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]], prefix: str, delims: str,
1123 extra_prefix: Optional[Tuple[Union[str, bytes], ...]]=None) -> Tuple[str, int, List[str]]:
1086 1124 """Used by dict_key_matches, matching the prefix to a list of keys
1087 1125
1088 1126 Parameters
1089 1127 ----------
1090 1128 keys
1091 1129 list of keys in dictionary currently being completed.
1092 1130 prefix
1093 1131 Part of the text already typed by the user. E.g. `mydict[b'fo`
1094 1132 delims
1095 1133 String of delimiters to consider when finding the current key.
1096 1134 extra_prefix : optional
1097 1135 Part of the text already typed in multi-key index cases. E.g. for
1098 1136 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1099 1137
1100 1138 Returns
1101 1139 -------
1102 1140 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1103 1141 ``quote`` being the quote that need to be used to close current string.
1104 1142 ``token_start`` the position where the replacement should start occurring,
1105 1143 ``matches`` a list of replacement/completion
1106 1144
1107 1145 """
1108 1146 prefix_tuple = extra_prefix if extra_prefix else ()
1147
1109 1148 Nprefix = len(prefix_tuple)
1149 text_serializable_types = (str, bytes, int, float, slice)
1110 1150 def filter_prefix_tuple(key):
1111 1151 # Reject too short keys
1112 1152 if len(key) <= Nprefix:
1113 1153 return False
1114 # Reject keys with non str/bytes in it
1154 # Reject keys which cannot be serialised to text
1115 1155 for k in key:
1116 if not isinstance(k, (str, bytes)):
1156 if not isinstance(k, text_serializable_types):
1117 1157 return False
1118 1158 # Reject keys that do not match the prefix
1119 1159 for k, pt in zip(key, prefix_tuple):
1120 if k != pt:
1160 if k != pt and not isinstance(pt, slice):
1121 1161 return False
1122 1162 # All checks passed!
1123 1163 return True
1124 1164
1125 filtered_keys:List[Union[str,bytes]] = []
1165 filtered_keys: List[Union[str, bytes, int, float, slice]] = []
1166
1126 1167 def _add_to_filtered_keys(key):
1127 if isinstance(key, (str, bytes)):
1168 if isinstance(key, text_serializable_types):
1128 1169 filtered_keys.append(key)
1129 1170
1130 1171 for k in keys:
1131 1172 if isinstance(k, tuple):
1132 1173 if filter_prefix_tuple(k):
1133 1174 _add_to_filtered_keys(k[Nprefix])
1134 1175 else:
1135 1176 _add_to_filtered_keys(k)
1136 1177
1137 1178 if not prefix:
1138 1179 return '', 0, [repr(k) for k in filtered_keys]
1139 1180 quote_match = re.search('["\']', prefix)
1140 1181 assert quote_match is not None # silence mypy
1141 1182 quote = quote_match.group()
1142 1183 try:
1143 prefix_str = eval(prefix + quote, {})
1184 prefix_str = literal_eval(prefix + quote)
1144 1185 except Exception:
1145 1186 return '', 0, []
1146 1187
1147 1188 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1148 1189 token_match = re.search(pattern, prefix, re.UNICODE)
1149 1190 assert token_match is not None # silence mypy
1150 1191 token_start = token_match.start()
1151 1192 token_prefix = token_match.group()
1152 1193
1153 1194 matched:List[str] = []
1154 1195 for key in filtered_keys:
1196 str_key = key if isinstance(key, (str, bytes)) else str(key)
1155 1197 try:
1156 if not key.startswith(prefix_str):
1198 if not str_key.startswith(prefix_str):
1157 1199 continue
1158 1200 except (AttributeError, TypeError, UnicodeError):
1159 1201 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1160 1202 continue
1161 1203
1162 1204 # reformat remainder of key to begin with prefix
1163 rem = key[len(prefix_str):]
1205 rem = str_key[len(prefix_str):]
1164 1206 # force repr wrapped in '
1165 1207 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1166 1208 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1167 1209 if quote == '"':
1168 1210 # The entered prefix is quoted with ",
1169 1211 # but the match is quoted with '.
1170 1212 # A contained " hence needs escaping for comparison:
1171 1213 rem_repr = rem_repr.replace('"', '\\"')
1172 1214
1173 1215 # then reinsert prefix from start of token
1174 1216 matched.append('%s%s' % (token_prefix, rem_repr))
1175 1217 return quote, token_start, matched
1176 1218
1177 1219
1178 1220 def cursor_to_position(text:str, line:int, column:int)->int:
1179 1221 """
1180 1222 Convert the (line,column) position of the cursor in text to an offset in a
1181 1223 string.
1182 1224
1183 1225 Parameters
1184 1226 ----------
1185 1227 text : str
1186 1228 The text in which to calculate the cursor offset
1187 1229 line : int
1188 1230 Line of the cursor; 0-indexed
1189 1231 column : int
1190 1232 Column of the cursor 0-indexed
1191 1233
1192 1234 Returns
1193 1235 -------
1194 1236 Position of the cursor in ``text``, 0-indexed.
1195 1237
1196 1238 See Also
1197 1239 --------
1198 1240 position_to_cursor : reciprocal of this function
1199 1241
1200 1242 """
1201 1243 lines = text.split('\n')
1202 1244 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1203 1245
1204 1246 return sum(len(l) + 1 for l in lines[:line]) + column
1205 1247
1206 1248 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1207 1249 """
1208 1250 Convert the position of the cursor in text (0 indexed) to a line
1209 1251 number(0-indexed) and a column number (0-indexed) pair
1210 1252
1211 1253 Position should be a valid position in ``text``.
1212 1254
1213 1255 Parameters
1214 1256 ----------
1215 1257 text : str
1216 1258 The text in which to calculate the cursor offset
1217 1259 offset : int
1218 1260 Position of the cursor in ``text``, 0-indexed.
1219 1261
1220 1262 Returns
1221 1263 -------
1222 1264 (line, column) : (int, int)
1223 1265 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1224 1266
1225 1267 See Also
1226 1268 --------
1227 1269 cursor_to_position : reciprocal of this function
1228 1270
1229 1271 """
1230 1272
1231 1273 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1232 1274
1233 1275 before = text[:offset]
1234 1276 blines = before.split('\n') # ! splitnes trim trailing \n
1235 1277 line = before.count('\n')
1236 1278 col = len(blines[-1])
1237 1279 return line, col
1238 1280
1239 1281
1240 def _safe_isinstance(obj, module, class_name):
1282 def _safe_isinstance(obj, module, class_name, *attrs):
1241 1283 """Checks if obj is an instance of module.class_name if loaded
1242 1284 """
1243 return (module in sys.modules and
1244 isinstance(obj, getattr(import_module(module), class_name)))
1285 if module in sys.modules:
1286 m = sys.modules[module]
1287 for attr in [class_name, *attrs]:
1288 m = getattr(m, attr)
1289 return isinstance(obj, m)
1245 1290
1246 1291
1247 1292 @context_matcher()
1248 1293 def back_unicode_name_matcher(context: CompletionContext):
1249 1294 """Match Unicode characters back to Unicode name
1250 1295
1251 1296 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1252 1297 """
1253 1298 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1254 1299 return _convert_matcher_v1_result_to_v2(
1255 1300 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1256 1301 )
1257 1302
1258 1303
1259 1304 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1260 1305 """Match Unicode characters back to Unicode name
1261 1306
1262 1307 This does ``β˜ƒ`` -> ``\\snowman``
1263 1308
1264 1309 Note that snowman is not a valid python3 combining character but will be expanded.
1265 1310 Though it will not recombine back to the snowman character by the completion machinery.
1266 1311
1267 1312 This will not either back-complete standard sequences like \\n, \\b ...
1268 1313
1269 1314 .. deprecated:: 8.6
1270 1315 You can use :meth:`back_unicode_name_matcher` instead.
1271 1316
1272 1317 Returns
1273 1318 =======
1274 1319
1275 1320 Return a tuple with two elements:
1276 1321
1277 1322 - The Unicode character that was matched (preceded with a backslash), or
1278 1323 empty string,
1279 1324 - a sequence (of 1), name for the match Unicode character, preceded by
1280 1325 backslash, or empty if no match.
1281 1326 """
1282 1327 if len(text)<2:
1283 1328 return '', ()
1284 1329 maybe_slash = text[-2]
1285 1330 if maybe_slash != '\\':
1286 1331 return '', ()
1287 1332
1288 1333 char = text[-1]
1289 1334 # no expand on quote for completion in strings.
1290 1335 # nor backcomplete standard ascii keys
1291 1336 if char in string.ascii_letters or char in ('"',"'"):
1292 1337 return '', ()
1293 1338 try :
1294 1339 unic = unicodedata.name(char)
1295 1340 return '\\'+char,('\\'+unic,)
1296 1341 except KeyError:
1297 1342 pass
1298 1343 return '', ()
1299 1344
1300 1345
1301 1346 @context_matcher()
1302 1347 def back_latex_name_matcher(context: CompletionContext):
1303 1348 """Match latex characters back to unicode name
1304 1349
1305 1350 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1306 1351 """
1307 1352 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1308 1353 return _convert_matcher_v1_result_to_v2(
1309 1354 matches, type="latex", fragment=fragment, suppress_if_matches=True
1310 1355 )
1311 1356
1312 1357
1313 1358 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1314 1359 """Match latex characters back to unicode name
1315 1360
1316 1361 This does ``\\β„΅`` -> ``\\aleph``
1317 1362
1318 1363 .. deprecated:: 8.6
1319 1364 You can use :meth:`back_latex_name_matcher` instead.
1320 1365 """
1321 1366 if len(text)<2:
1322 1367 return '', ()
1323 1368 maybe_slash = text[-2]
1324 1369 if maybe_slash != '\\':
1325 1370 return '', ()
1326 1371
1327 1372
1328 1373 char = text[-1]
1329 1374 # no expand on quote for completion in strings.
1330 1375 # nor backcomplete standard ascii keys
1331 1376 if char in string.ascii_letters or char in ('"',"'"):
1332 1377 return '', ()
1333 1378 try :
1334 1379 latex = reverse_latex_symbol[char]
1335 1380 # '\\' replace the \ as well
1336 1381 return '\\'+char,[latex]
1337 1382 except KeyError:
1338 1383 pass
1339 1384 return '', ()
1340 1385
1341 1386
1342 1387 def _formatparamchildren(parameter) -> str:
1343 1388 """
1344 1389 Get parameter name and value from Jedi Private API
1345 1390
1346 1391 Jedi does not expose a simple way to get `param=value` from its API.
1347 1392
1348 1393 Parameters
1349 1394 ----------
1350 1395 parameter
1351 1396 Jedi's function `Param`
1352 1397
1353 1398 Returns
1354 1399 -------
1355 1400 A string like 'a', 'b=1', '*args', '**kwargs'
1356 1401
1357 1402 """
1358 1403 description = parameter.description
1359 1404 if not description.startswith('param '):
1360 1405 raise ValueError('Jedi function parameter description have change format.'
1361 1406 'Expected "param ...", found %r".' % description)
1362 1407 return description[6:]
1363 1408
1364 1409 def _make_signature(completion)-> str:
1365 1410 """
1366 1411 Make the signature from a jedi completion
1367 1412
1368 1413 Parameters
1369 1414 ----------
1370 1415 completion : jedi.Completion
1371 1416 object does not complete a function type
1372 1417
1373 1418 Returns
1374 1419 -------
1375 1420 a string consisting of the function signature, with the parenthesis but
1376 1421 without the function name. example:
1377 1422 `(a, *args, b=1, **kwargs)`
1378 1423
1379 1424 """
1380 1425
1381 1426 # it looks like this might work on jedi 0.17
1382 1427 if hasattr(completion, 'get_signatures'):
1383 1428 signatures = completion.get_signatures()
1384 1429 if not signatures:
1385 1430 return '(?)'
1386 1431
1387 1432 c0 = completion.get_signatures()[0]
1388 1433 return '('+c0.to_string().split('(', maxsplit=1)[1]
1389 1434
1390 1435 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1391 1436 for p in signature.defined_names()) if f])
1392 1437
1393 1438
1394 1439 _CompleteResult = Dict[str, MatcherResult]
1395 1440
1396 1441
1442 DICT_MATCHER_REGEX = re.compile(r"""(?x)
1443 ( # match dict-referring - or any get item object - expression
1444 .+
1445 )
1446 \[ # open bracket
1447 \s* # and optional whitespace
1448 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1449 # and slices
1450 ((?:[uUbB]? # string prefix (r not handled)
1451 (?:
1452 '(?:[^']|(?<!\\)\\')*'
1453 |
1454 "(?:[^"]|(?<!\\)\\")*"
1455 |
1456 # capture integers and slices
1457 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1458 )
1459 \s*,\s*
1460 )*)
1461 ([uUbB]? # string prefix (r not handled)
1462 (?: # unclosed string
1463 '(?:[^']|(?<!\\)\\')*
1464 |
1465 "(?:[^"]|(?<!\\)\\")*
1466 |
1467 (?:[-+]?\d+)
1468 )
1469 )?
1470 $
1471 """)
1472
1397 1473 def _convert_matcher_v1_result_to_v2(
1398 1474 matches: Sequence[str],
1399 1475 type: str,
1400 1476 fragment: str = None,
1401 1477 suppress_if_matches: bool = False,
1402 1478 ) -> SimpleMatcherResult:
1403 1479 """Utility to help with transition"""
1404 1480 result = {
1405 1481 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1406 1482 "suppress": (True if matches else False) if suppress_if_matches else False,
1407 1483 }
1408 1484 if fragment is not None:
1409 1485 result["matched_fragment"] = fragment
1410 1486 return result
1411 1487
1412 1488
1413 1489 class IPCompleter(Completer):
1414 1490 """Extension of the completer class with IPython-specific features"""
1415 1491
1416 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1417
1418 1492 @observe('greedy')
1419 1493 def _greedy_changed(self, change):
1420 1494 """update the splitter and readline delims when greedy is changed"""
1421 1495 if change['new']:
1496 self.evaluation = 'unsafe'
1422 1497 self.splitter.delims = GREEDY_DELIMS
1423 1498 else:
1499 self.evaluation = 'limitted'
1424 1500 self.splitter.delims = DELIMS
1425 1501
1426 1502 dict_keys_only = Bool(
1427 1503 False,
1428 1504 help="""
1429 1505 Whether to show dict key matches only.
1430 1506
1431 1507 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1432 1508 """,
1433 1509 )
1434 1510
1435 1511 suppress_competing_matchers = UnionTrait(
1436 1512 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1437 1513 default_value=None,
1438 1514 help="""
1439 1515 Whether to suppress completions from other *Matchers*.
1440 1516
1441 1517 When set to ``None`` (default) the matchers will attempt to auto-detect
1442 1518 whether suppression of other matchers is desirable. For example, at
1443 1519 the beginning of a line followed by `%` we expect a magic completion
1444 1520 to be the only applicable option, and after ``my_dict['`` we usually
1445 1521 expect a completion with an existing dictionary key.
1446 1522
1447 1523 If you want to disable this heuristic and see completions from all matchers,
1448 1524 set ``IPCompleter.suppress_competing_matchers = False``.
1449 1525 To disable the heuristic for specific matchers provide a dictionary mapping:
1450 1526 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1451 1527
1452 1528 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1453 1529 completions to the set of matchers with the highest priority;
1454 1530 this is equivalent to ``IPCompleter.merge_completions`` and
1455 1531 can be beneficial for performance, but will sometimes omit relevant
1456 1532 candidates from matchers further down the priority list.
1457 1533 """,
1458 1534 ).tag(config=True)
1459 1535
1460 1536 merge_completions = Bool(
1461 1537 True,
1462 1538 help="""Whether to merge completion results into a single list
1463 1539
1464 1540 If False, only the completion results from the first non-empty
1465 1541 completer will be returned.
1466 1542
1467 1543 As of version 8.6.0, setting the value to ``False`` is an alias for:
1468 1544 ``IPCompleter.suppress_competing_matchers = True.``.
1469 1545 """,
1470 1546 ).tag(config=True)
1471 1547
1472 1548 disable_matchers = ListTrait(
1473 1549 Unicode(),
1474 1550 help="""List of matchers to disable.
1475 1551
1476 1552 The list should contain matcher identifiers (see :any:`completion_matcher`).
1477 1553 """,
1478 1554 ).tag(config=True)
1479 1555
1480 1556 omit__names = Enum(
1481 1557 (0, 1, 2),
1482 1558 default_value=2,
1483 1559 help="""Instruct the completer to omit private method names
1484 1560
1485 1561 Specifically, when completing on ``object.<tab>``.
1486 1562
1487 1563 When 2 [default]: all names that start with '_' will be excluded.
1488 1564
1489 1565 When 1: all 'magic' names (``__foo__``) will be excluded.
1490 1566
1491 1567 When 0: nothing will be excluded.
1492 1568 """
1493 1569 ).tag(config=True)
1494 1570 limit_to__all__ = Bool(False,
1495 1571 help="""
1496 1572 DEPRECATED as of version 5.0.
1497 1573
1498 1574 Instruct the completer to use __all__ for the completion
1499 1575
1500 1576 Specifically, when completing on ``object.<tab>``.
1501 1577
1502 1578 When True: only those names in obj.__all__ will be included.
1503 1579
1504 1580 When False [default]: the __all__ attribute is ignored
1505 1581 """,
1506 1582 ).tag(config=True)
1507 1583
1508 1584 profile_completions = Bool(
1509 1585 default_value=False,
1510 1586 help="If True, emit profiling data for completion subsystem using cProfile."
1511 1587 ).tag(config=True)
1512 1588
1513 1589 profiler_output_dir = Unicode(
1514 1590 default_value=".completion_profiles",
1515 1591 help="Template for path at which to output profile data for completions."
1516 1592 ).tag(config=True)
1517 1593
1518 1594 @observe('limit_to__all__')
1519 1595 def _limit_to_all_changed(self, change):
1520 1596 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1521 1597 'value has been deprecated since IPython 5.0, will be made to have '
1522 1598 'no effects and then removed in future version of IPython.',
1523 1599 UserWarning)
1524 1600
1525 1601 def __init__(
1526 1602 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1527 1603 ):
1528 1604 """IPCompleter() -> completer
1529 1605
1530 1606 Return a completer object.
1531 1607
1532 1608 Parameters
1533 1609 ----------
1534 1610 shell
1535 1611 a pointer to the ipython shell itself. This is needed
1536 1612 because this completer knows about magic functions, and those can
1537 1613 only be accessed via the ipython instance.
1538 1614 namespace : dict, optional
1539 1615 an optional dict where completions are performed.
1540 1616 global_namespace : dict, optional
1541 1617 secondary optional dict for completions, to
1542 1618 handle cases (such as IPython embedded inside functions) where
1543 1619 both Python scopes are visible.
1544 1620 config : Config
1545 1621 traitlet's config object
1546 1622 **kwargs
1547 1623 passed to super class unmodified.
1548 1624 """
1549 1625
1550 1626 self.magic_escape = ESC_MAGIC
1551 1627 self.splitter = CompletionSplitter()
1552 1628
1553 1629 # _greedy_changed() depends on splitter and readline being defined:
1554 1630 super().__init__(
1555 1631 namespace=namespace,
1556 1632 global_namespace=global_namespace,
1557 1633 config=config,
1558 1634 **kwargs,
1559 1635 )
1560 1636
1561 1637 # List where completion matches will be stored
1562 1638 self.matches = []
1563 1639 self.shell = shell
1564 1640 # Regexp to split filenames with spaces in them
1565 1641 self.space_name_re = re.compile(r'([^\\] )')
1566 1642 # Hold a local ref. to glob.glob for speed
1567 1643 self.glob = glob.glob
1568 1644
1569 1645 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1570 1646 # buffers, to avoid completion problems.
1571 1647 term = os.environ.get('TERM','xterm')
1572 1648 self.dumb_terminal = term in ['dumb','emacs']
1573 1649
1574 1650 # Special handling of backslashes needed in win32 platforms
1575 1651 if sys.platform == "win32":
1576 1652 self.clean_glob = self._clean_glob_win32
1577 1653 else:
1578 1654 self.clean_glob = self._clean_glob
1579 1655
1580 1656 #regexp to parse docstring for function signature
1581 1657 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1582 1658 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1583 1659 #use this if positional argument name is also needed
1584 1660 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1585 1661
1586 1662 self.magic_arg_matchers = [
1587 1663 self.magic_config_matcher,
1588 1664 self.magic_color_matcher,
1589 1665 ]
1590 1666
1591 1667 # This is set externally by InteractiveShell
1592 1668 self.custom_completers = None
1593 1669
1594 1670 # This is a list of names of unicode characters that can be completed
1595 1671 # into their corresponding unicode value. The list is large, so we
1596 1672 # lazily initialize it on first use. Consuming code should access this
1597 1673 # attribute through the `@unicode_names` property.
1598 1674 self._unicode_names = None
1599 1675
1600 1676 self._backslash_combining_matchers = [
1601 1677 self.latex_name_matcher,
1602 1678 self.unicode_name_matcher,
1603 1679 back_latex_name_matcher,
1604 1680 back_unicode_name_matcher,
1605 1681 self.fwd_unicode_matcher,
1606 1682 ]
1607 1683
1608 1684 if not self.backslash_combining_completions:
1609 1685 for matcher in self._backslash_combining_matchers:
1610 1686 self.disable_matchers.append(matcher.matcher_identifier)
1611 1687
1612 1688 if not self.merge_completions:
1613 1689 self.suppress_competing_matchers = True
1614 1690
1615 1691 @property
1616 1692 def matchers(self) -> List[Matcher]:
1617 1693 """All active matcher routines for completion"""
1618 1694 if self.dict_keys_only:
1619 1695 return [self.dict_key_matcher]
1620 1696
1621 1697 if self.use_jedi:
1622 1698 return [
1623 1699 *self.custom_matchers,
1624 1700 *self._backslash_combining_matchers,
1625 1701 *self.magic_arg_matchers,
1626 1702 self.custom_completer_matcher,
1627 1703 self.magic_matcher,
1628 1704 self._jedi_matcher,
1629 1705 self.dict_key_matcher,
1630 1706 self.file_matcher,
1631 1707 ]
1632 1708 else:
1633 1709 return [
1634 1710 *self.custom_matchers,
1635 1711 *self._backslash_combining_matchers,
1636 1712 *self.magic_arg_matchers,
1637 1713 self.custom_completer_matcher,
1638 1714 self.dict_key_matcher,
1639 1715 # TODO: convert python_matches to v2 API
1640 1716 self.magic_matcher,
1641 1717 self.python_matches,
1642 1718 self.file_matcher,
1643 1719 self.python_func_kw_matcher,
1644 1720 ]
1645 1721
1646 1722 def all_completions(self, text:str) -> List[str]:
1647 1723 """
1648 1724 Wrapper around the completion methods for the benefit of emacs.
1649 1725 """
1650 1726 prefix = text.rpartition('.')[0]
1651 1727 with provisionalcompleter():
1652 1728 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1653 1729 for c in self.completions(text, len(text))]
1654 1730
1655 1731 return self.complete(text)[1]
1656 1732
1657 1733 def _clean_glob(self, text:str):
1658 1734 return self.glob("%s*" % text)
1659 1735
1660 1736 def _clean_glob_win32(self, text:str):
1661 1737 return [f.replace("\\","/")
1662 1738 for f in self.glob("%s*" % text)]
1663 1739
1664 1740 @context_matcher()
1665 1741 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1666 1742 """Same as :any:`file_matches`, but adopted to new Matcher API."""
1667 1743 matches = self.file_matches(context.token)
1668 1744 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
1669 1745 # starts with `/home/`, `C:\`, etc)
1670 1746 return _convert_matcher_v1_result_to_v2(matches, type="path")
1671 1747
1672 1748 def file_matches(self, text: str) -> List[str]:
1673 1749 """Match filenames, expanding ~USER type strings.
1674 1750
1675 1751 Most of the seemingly convoluted logic in this completer is an
1676 1752 attempt to handle filenames with spaces in them. And yet it's not
1677 1753 quite perfect, because Python's readline doesn't expose all of the
1678 1754 GNU readline details needed for this to be done correctly.
1679 1755
1680 1756 For a filename with a space in it, the printed completions will be
1681 1757 only the parts after what's already been typed (instead of the
1682 1758 full completions, as is normally done). I don't think with the
1683 1759 current (as of Python 2.3) Python readline it's possible to do
1684 1760 better.
1685 1761
1686 1762 .. deprecated:: 8.6
1687 1763 You can use :meth:`file_matcher` instead.
1688 1764 """
1689 1765
1690 1766 # chars that require escaping with backslash - i.e. chars
1691 1767 # that readline treats incorrectly as delimiters, but we
1692 1768 # don't want to treat as delimiters in filename matching
1693 1769 # when escaped with backslash
1694 1770 if text.startswith('!'):
1695 1771 text = text[1:]
1696 1772 text_prefix = u'!'
1697 1773 else:
1698 1774 text_prefix = u''
1699 1775
1700 1776 text_until_cursor = self.text_until_cursor
1701 1777 # track strings with open quotes
1702 1778 open_quotes = has_open_quotes(text_until_cursor)
1703 1779
1704 1780 if '(' in text_until_cursor or '[' in text_until_cursor:
1705 1781 lsplit = text
1706 1782 else:
1707 1783 try:
1708 1784 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1709 1785 lsplit = arg_split(text_until_cursor)[-1]
1710 1786 except ValueError:
1711 1787 # typically an unmatched ", or backslash without escaped char.
1712 1788 if open_quotes:
1713 1789 lsplit = text_until_cursor.split(open_quotes)[-1]
1714 1790 else:
1715 1791 return []
1716 1792 except IndexError:
1717 1793 # tab pressed on empty line
1718 1794 lsplit = ""
1719 1795
1720 1796 if not open_quotes and lsplit != protect_filename(lsplit):
1721 1797 # if protectables are found, do matching on the whole escaped name
1722 1798 has_protectables = True
1723 1799 text0,text = text,lsplit
1724 1800 else:
1725 1801 has_protectables = False
1726 1802 text = os.path.expanduser(text)
1727 1803
1728 1804 if text == "":
1729 1805 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1730 1806
1731 1807 # Compute the matches from the filesystem
1732 1808 if sys.platform == 'win32':
1733 1809 m0 = self.clean_glob(text)
1734 1810 else:
1735 1811 m0 = self.clean_glob(text.replace('\\', ''))
1736 1812
1737 1813 if has_protectables:
1738 1814 # If we had protectables, we need to revert our changes to the
1739 1815 # beginning of filename so that we don't double-write the part
1740 1816 # of the filename we have so far
1741 1817 len_lsplit = len(lsplit)
1742 1818 matches = [text_prefix + text0 +
1743 1819 protect_filename(f[len_lsplit:]) for f in m0]
1744 1820 else:
1745 1821 if open_quotes:
1746 1822 # if we have a string with an open quote, we don't need to
1747 1823 # protect the names beyond the quote (and we _shouldn't_, as
1748 1824 # it would cause bugs when the filesystem call is made).
1749 1825 matches = m0 if sys.platform == "win32" else\
1750 1826 [protect_filename(f, open_quotes) for f in m0]
1751 1827 else:
1752 1828 matches = [text_prefix +
1753 1829 protect_filename(f) for f in m0]
1754 1830
1755 1831 # Mark directories in input list by appending '/' to their names.
1756 1832 return [x+'/' if os.path.isdir(x) else x for x in matches]
1757 1833
1758 1834 @context_matcher()
1759 1835 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1760 1836 """Match magics."""
1761 1837 text = context.token
1762 1838 matches = self.magic_matches(text)
1763 1839 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
1764 1840 is_magic_prefix = len(text) > 0 and text[0] == "%"
1765 1841 result["suppress"] = is_magic_prefix and bool(result["completions"])
1766 1842 return result
1767 1843
1768 1844 def magic_matches(self, text: str):
1769 1845 """Match magics.
1770 1846
1771 1847 .. deprecated:: 8.6
1772 1848 You can use :meth:`magic_matcher` instead.
1773 1849 """
1774 1850 # Get all shell magics now rather than statically, so magics loaded at
1775 1851 # runtime show up too.
1776 1852 lsm = self.shell.magics_manager.lsmagic()
1777 1853 line_magics = lsm['line']
1778 1854 cell_magics = lsm['cell']
1779 1855 pre = self.magic_escape
1780 1856 pre2 = pre+pre
1781 1857
1782 1858 explicit_magic = text.startswith(pre)
1783 1859
1784 1860 # Completion logic:
1785 1861 # - user gives %%: only do cell magics
1786 1862 # - user gives %: do both line and cell magics
1787 1863 # - no prefix: do both
1788 1864 # In other words, line magics are skipped if the user gives %% explicitly
1789 1865 #
1790 1866 # We also exclude magics that match any currently visible names:
1791 1867 # https://github.com/ipython/ipython/issues/4877, unless the user has
1792 1868 # typed a %:
1793 1869 # https://github.com/ipython/ipython/issues/10754
1794 1870 bare_text = text.lstrip(pre)
1795 1871 global_matches = self.global_matches(bare_text)
1796 1872 if not explicit_magic:
1797 1873 def matches(magic):
1798 1874 """
1799 1875 Filter magics, in particular remove magics that match
1800 1876 a name present in global namespace.
1801 1877 """
1802 1878 return ( magic.startswith(bare_text) and
1803 1879 magic not in global_matches )
1804 1880 else:
1805 1881 def matches(magic):
1806 1882 return magic.startswith(bare_text)
1807 1883
1808 1884 comp = [ pre2+m for m in cell_magics if matches(m)]
1809 1885 if not text.startswith(pre2):
1810 1886 comp += [ pre+m for m in line_magics if matches(m)]
1811 1887
1812 1888 return comp
1813 1889
1814 1890 @context_matcher()
1815 1891 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1816 1892 """Match class names and attributes for %config magic."""
1817 1893 # NOTE: uses `line_buffer` equivalent for compatibility
1818 1894 matches = self.magic_config_matches(context.line_with_cursor)
1819 1895 return _convert_matcher_v1_result_to_v2(matches, type="param")
1820 1896
1821 1897 def magic_config_matches(self, text: str) -> List[str]:
1822 1898 """Match class names and attributes for %config magic.
1823 1899
1824 1900 .. deprecated:: 8.6
1825 1901 You can use :meth:`magic_config_matcher` instead.
1826 1902 """
1827 1903 texts = text.strip().split()
1828 1904
1829 1905 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1830 1906 # get all configuration classes
1831 1907 classes = sorted(set([ c for c in self.shell.configurables
1832 1908 if c.__class__.class_traits(config=True)
1833 1909 ]), key=lambda x: x.__class__.__name__)
1834 1910 classnames = [ c.__class__.__name__ for c in classes ]
1835 1911
1836 1912 # return all classnames if config or %config is given
1837 1913 if len(texts) == 1:
1838 1914 return classnames
1839 1915
1840 1916 # match classname
1841 1917 classname_texts = texts[1].split('.')
1842 1918 classname = classname_texts[0]
1843 1919 classname_matches = [ c for c in classnames
1844 1920 if c.startswith(classname) ]
1845 1921
1846 1922 # return matched classes or the matched class with attributes
1847 1923 if texts[1].find('.') < 0:
1848 1924 return classname_matches
1849 1925 elif len(classname_matches) == 1 and \
1850 1926 classname_matches[0] == classname:
1851 1927 cls = classes[classnames.index(classname)].__class__
1852 1928 help = cls.class_get_help()
1853 1929 # strip leading '--' from cl-args:
1854 1930 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1855 1931 return [ attr.split('=')[0]
1856 1932 for attr in help.strip().splitlines()
1857 1933 if attr.startswith(texts[1]) ]
1858 1934 return []
1859 1935
1860 1936 @context_matcher()
1861 1937 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
1862 1938 """Match color schemes for %colors magic."""
1863 1939 # NOTE: uses `line_buffer` equivalent for compatibility
1864 1940 matches = self.magic_color_matches(context.line_with_cursor)
1865 1941 return _convert_matcher_v1_result_to_v2(matches, type="param")
1866 1942
1867 1943 def magic_color_matches(self, text: str) -> List[str]:
1868 1944 """Match color schemes for %colors magic.
1869 1945
1870 1946 .. deprecated:: 8.6
1871 1947 You can use :meth:`magic_color_matcher` instead.
1872 1948 """
1873 1949 texts = text.split()
1874 1950 if text.endswith(' '):
1875 1951 # .split() strips off the trailing whitespace. Add '' back
1876 1952 # so that: '%colors ' -> ['%colors', '']
1877 1953 texts.append('')
1878 1954
1879 1955 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1880 1956 prefix = texts[1]
1881 1957 return [ color for color in InspectColors.keys()
1882 1958 if color.startswith(prefix) ]
1883 1959 return []
1884 1960
1885 1961 @context_matcher(identifier="IPCompleter.jedi_matcher")
1886 1962 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
1887 1963 matches = self._jedi_matches(
1888 1964 cursor_column=context.cursor_position,
1889 1965 cursor_line=context.cursor_line,
1890 1966 text=context.full_text,
1891 1967 )
1892 1968 return {
1893 1969 "completions": matches,
1894 1970 # static analysis should not suppress other matchers
1895 1971 "suppress": False,
1896 1972 }
1897 1973
1898 1974 def _jedi_matches(
1899 1975 self, cursor_column: int, cursor_line: int, text: str
1900 1976 ) -> Iterable[_JediCompletionLike]:
1901 1977 """
1902 1978 Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
1903 1979 cursor position.
1904 1980
1905 1981 Parameters
1906 1982 ----------
1907 1983 cursor_column : int
1908 1984 column position of the cursor in ``text``, 0-indexed.
1909 1985 cursor_line : int
1910 1986 line position of the cursor in ``text``, 0-indexed
1911 1987 text : str
1912 1988 text to complete
1913 1989
1914 1990 Notes
1915 1991 -----
1916 1992 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1917 1993 object containing a string with the Jedi debug information attached.
1918 1994
1919 1995 .. deprecated:: 8.6
1920 1996 You can use :meth:`_jedi_matcher` instead.
1921 1997 """
1922 1998 namespaces = [self.namespace]
1923 1999 if self.global_namespace is not None:
1924 2000 namespaces.append(self.global_namespace)
1925 2001
1926 2002 completion_filter = lambda x:x
1927 2003 offset = cursor_to_position(text, cursor_line, cursor_column)
1928 2004 # filter output if we are completing for object members
1929 2005 if offset:
1930 2006 pre = text[offset-1]
1931 2007 if pre == '.':
1932 2008 if self.omit__names == 2:
1933 2009 completion_filter = lambda c:not c.name.startswith('_')
1934 2010 elif self.omit__names == 1:
1935 2011 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1936 2012 elif self.omit__names == 0:
1937 2013 completion_filter = lambda x:x
1938 2014 else:
1939 2015 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1940 2016
1941 2017 interpreter = jedi.Interpreter(text[:offset], namespaces)
1942 2018 try_jedi = True
1943 2019
1944 2020 try:
1945 2021 # find the first token in the current tree -- if it is a ' or " then we are in a string
1946 2022 completing_string = False
1947 2023 try:
1948 2024 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1949 2025 except StopIteration:
1950 2026 pass
1951 2027 else:
1952 2028 # note the value may be ', ", or it may also be ''' or """, or
1953 2029 # in some cases, """what/you/typed..., but all of these are
1954 2030 # strings.
1955 2031 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1956 2032
1957 2033 # if we are in a string jedi is likely not the right candidate for
1958 2034 # now. Skip it.
1959 2035 try_jedi = not completing_string
1960 2036 except Exception as e:
1961 2037 # many of things can go wrong, we are using private API just don't crash.
1962 2038 if self.debug:
1963 2039 print("Error detecting if completing a non-finished string :", e, '|')
1964 2040
1965 2041 if not try_jedi:
1966 2042 return []
1967 2043 try:
1968 2044 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1969 2045 except Exception as e:
1970 2046 if self.debug:
1971 2047 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1972 2048 else:
1973 2049 return []
1974 2050
1975 2051 def python_matches(self, text: str) -> Iterable[str]:
1976 2052 """Match attributes or global python names"""
1977 2053 if "." in text:
1978 2054 try:
1979 2055 matches = self.attr_matches(text)
1980 2056 if text.endswith('.') and self.omit__names:
1981 2057 if self.omit__names == 1:
1982 2058 # true if txt is _not_ a __ name, false otherwise:
1983 2059 no__name = (lambda txt:
1984 2060 re.match(r'.*\.__.*?__',txt) is None)
1985 2061 else:
1986 2062 # true if txt is _not_ a _ name, false otherwise:
1987 2063 no__name = (lambda txt:
1988 2064 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1989 2065 matches = filter(no__name, matches)
1990 2066 except NameError:
1991 2067 # catches <undefined attributes>.<tab>
1992 2068 matches = []
1993 2069 else:
1994 2070 matches = self.global_matches(text)
1995 2071 return matches
1996 2072
1997 2073 def _default_arguments_from_docstring(self, doc):
1998 2074 """Parse the first line of docstring for call signature.
1999 2075
2000 2076 Docstring should be of the form 'min(iterable[, key=func])\n'.
2001 2077 It can also parse cython docstring of the form
2002 2078 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2003 2079 """
2004 2080 if doc is None:
2005 2081 return []
2006 2082
2007 2083 #care only the firstline
2008 2084 line = doc.lstrip().splitlines()[0]
2009 2085
2010 2086 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2011 2087 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2012 2088 sig = self.docstring_sig_re.search(line)
2013 2089 if sig is None:
2014 2090 return []
2015 2091 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2016 2092 sig = sig.groups()[0].split(',')
2017 2093 ret = []
2018 2094 for s in sig:
2019 2095 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2020 2096 ret += self.docstring_kwd_re.findall(s)
2021 2097 return ret
2022 2098
2023 2099 def _default_arguments(self, obj):
2024 2100 """Return the list of default arguments of obj if it is callable,
2025 2101 or empty list otherwise."""
2026 2102 call_obj = obj
2027 2103 ret = []
2028 2104 if inspect.isbuiltin(obj):
2029 2105 pass
2030 2106 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2031 2107 if inspect.isclass(obj):
2032 2108 #for cython embedsignature=True the constructor docstring
2033 2109 #belongs to the object itself not __init__
2034 2110 ret += self._default_arguments_from_docstring(
2035 2111 getattr(obj, '__doc__', ''))
2036 2112 # for classes, check for __init__,__new__
2037 2113 call_obj = (getattr(obj, '__init__', None) or
2038 2114 getattr(obj, '__new__', None))
2039 2115 # for all others, check if they are __call__able
2040 2116 elif hasattr(obj, '__call__'):
2041 2117 call_obj = obj.__call__
2042 2118 ret += self._default_arguments_from_docstring(
2043 2119 getattr(call_obj, '__doc__', ''))
2044 2120
2045 2121 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2046 2122 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2047 2123
2048 2124 try:
2049 2125 sig = inspect.signature(obj)
2050 2126 ret.extend(k for k, v in sig.parameters.items() if
2051 2127 v.kind in _keeps)
2052 2128 except ValueError:
2053 2129 pass
2054 2130
2055 2131 return list(set(ret))
2056 2132
2057 2133 @context_matcher()
2058 2134 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2059 2135 """Match named parameters (kwargs) of the last open function."""
2060 2136 matches = self.python_func_kw_matches(context.token)
2061 2137 return _convert_matcher_v1_result_to_v2(matches, type="param")
2062 2138
2063 2139 def python_func_kw_matches(self, text):
2064 2140 """Match named parameters (kwargs) of the last open function.
2065 2141
2066 2142 .. deprecated:: 8.6
2067 2143 You can use :meth:`python_func_kw_matcher` instead.
2068 2144 """
2069 2145
2070 2146 if "." in text: # a parameter cannot be dotted
2071 2147 return []
2072 2148 try: regexp = self.__funcParamsRegex
2073 2149 except AttributeError:
2074 2150 regexp = self.__funcParamsRegex = re.compile(r'''
2075 2151 '.*?(?<!\\)' | # single quoted strings or
2076 2152 ".*?(?<!\\)" | # double quoted strings or
2077 2153 \w+ | # identifier
2078 2154 \S # other characters
2079 2155 ''', re.VERBOSE | re.DOTALL)
2080 2156 # 1. find the nearest identifier that comes before an unclosed
2081 2157 # parenthesis before the cursor
2082 2158 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2083 2159 tokens = regexp.findall(self.text_until_cursor)
2084 2160 iterTokens = reversed(tokens); openPar = 0
2085 2161
2086 2162 for token in iterTokens:
2087 2163 if token == ')':
2088 2164 openPar -= 1
2089 2165 elif token == '(':
2090 2166 openPar += 1
2091 2167 if openPar > 0:
2092 2168 # found the last unclosed parenthesis
2093 2169 break
2094 2170 else:
2095 2171 return []
2096 2172 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2097 2173 ids = []
2098 2174 isId = re.compile(r'\w+$').match
2099 2175
2100 2176 while True:
2101 2177 try:
2102 2178 ids.append(next(iterTokens))
2103 2179 if not isId(ids[-1]):
2104 2180 ids.pop(); break
2105 2181 if not next(iterTokens) == '.':
2106 2182 break
2107 2183 except StopIteration:
2108 2184 break
2109 2185
2110 2186 # Find all named arguments already assigned to, as to avoid suggesting
2111 2187 # them again
2112 2188 usedNamedArgs = set()
2113 2189 par_level = -1
2114 2190 for token, next_token in zip(tokens, tokens[1:]):
2115 2191 if token == '(':
2116 2192 par_level += 1
2117 2193 elif token == ')':
2118 2194 par_level -= 1
2119 2195
2120 2196 if par_level != 0:
2121 2197 continue
2122 2198
2123 2199 if next_token != '=':
2124 2200 continue
2125 2201
2126 2202 usedNamedArgs.add(token)
2127 2203
2128 2204 argMatches = []
2129 2205 try:
2130 2206 callableObj = '.'.join(ids[::-1])
2131 2207 namedArgs = self._default_arguments(eval(callableObj,
2132 2208 self.namespace))
2133 2209
2134 2210 # Remove used named arguments from the list, no need to show twice
2135 2211 for namedArg in set(namedArgs) - usedNamedArgs:
2136 2212 if namedArg.startswith(text):
2137 2213 argMatches.append("%s=" %namedArg)
2138 2214 except:
2139 2215 pass
2140 2216
2141 2217 return argMatches
2142 2218
2143 2219 @staticmethod
2144 2220 def _get_keys(obj: Any) -> List[Any]:
2145 2221 # Objects can define their own completions by defining an
2146 2222 # _ipy_key_completions_() method.
2147 2223 method = get_real_method(obj, '_ipython_key_completions_')
2148 2224 if method is not None:
2149 2225 return method()
2150 2226
2151 2227 # Special case some common in-memory dict-like types
2152 if isinstance(obj, dict) or\
2153 _safe_isinstance(obj, 'pandas', 'DataFrame'):
2228 if (isinstance(obj, dict) or
2229 _safe_isinstance(obj, 'pandas', 'DataFrame')):
2154 2230 try:
2155 2231 return list(obj.keys())
2156 2232 except Exception:
2157 2233 return []
2234 elif _safe_isinstance(obj, 'pandas', 'core', 'indexing', '_LocIndexer'):
2235 try:
2236 return list(obj.obj.keys())
2237 except Exception:
2238 return []
2158 2239 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2159 2240 _safe_isinstance(obj, 'numpy', 'void'):
2160 2241 return obj.dtype.names or []
2161 2242 return []
2162 2243
2163 2244 @context_matcher()
2164 2245 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2165 2246 """Match string keys in a dictionary, after e.g. ``foo[``."""
2166 2247 matches = self.dict_key_matches(context.token)
2167 2248 return _convert_matcher_v1_result_to_v2(
2168 2249 matches, type="dict key", suppress_if_matches=True
2169 2250 )
2170 2251
2171 2252 def dict_key_matches(self, text: str) -> List[str]:
2172 2253 """Match string keys in a dictionary, after e.g. ``foo[``.
2173 2254
2174 2255 .. deprecated:: 8.6
2175 2256 You can use :meth:`dict_key_matcher` instead.
2176 2257 """
2177 2258
2178 if self.__dict_key_regexps is not None:
2179 regexps = self.__dict_key_regexps
2180 else:
2181 dict_key_re_fmt = r'''(?x)
2182 ( # match dict-referring expression wrt greedy setting
2183 %s
2184 )
2185 \[ # open bracket
2186 \s* # and optional whitespace
2187 # Capture any number of str-like objects (e.g. "a", "b", 'c')
2188 ((?:[uUbB]? # string prefix (r not handled)
2189 (?:
2190 '(?:[^']|(?<!\\)\\')*'
2191 |
2192 "(?:[^"]|(?<!\\)\\")*"
2193 )
2194 \s*,\s*
2195 )*)
2196 ([uUbB]? # string prefix (r not handled)
2197 (?: # unclosed string
2198 '(?:[^']|(?<!\\)\\')*
2199 |
2200 "(?:[^"]|(?<!\\)\\")*
2201 )
2202 )?
2203 $
2204 '''
2205 regexps = self.__dict_key_regexps = {
2206 False: re.compile(dict_key_re_fmt % r'''
2207 # identifiers separated by .
2208 (?!\d)\w+
2209 (?:\.(?!\d)\w+)*
2210 '''),
2211 True: re.compile(dict_key_re_fmt % '''
2212 .+
2213 ''')
2214 }
2259 # Short-circuit on closed dictionary (regular expression would
2260 # not match anyway, but would take quite a while).
2261 if self.text_until_cursor.strip().endswith(']'):
2262 return []
2215 2263
2216 match = regexps[self.greedy].search(self.text_until_cursor)
2264 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2217 2265
2218 2266 if match is None:
2219 2267 return []
2220 2268
2221 expr, prefix0, prefix = match.groups()
2222 try:
2223 obj = eval(expr, self.namespace)
2224 except Exception:
2225 try:
2226 obj = eval(expr, self.global_namespace)
2227 except Exception:
2269 expr, prior_tuple_keys, key_prefix = match.groups()
2270
2271 obj = self._evaluate_expr(expr)
2272
2273 if obj is not_found:
2228 2274 return []
2229 2275
2230 2276 keys = self._get_keys(obj)
2231 2277 if not keys:
2232 2278 return keys
2233 2279
2234 extra_prefix = eval(prefix0) if prefix0 != '' else None
2280 tuple_prefix = guarded_eval(
2281 prior_tuple_keys,
2282 EvaluationContext(
2283 globals_=self.global_namespace,
2284 locals_=self.namespace,
2285 evaluation=self.evaluation,
2286 in_subscript=True
2287 )
2288 )
2235 2289
2236 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
2290 closing_quote, token_offset, matches = match_dict_keys(
2291 keys,
2292 key_prefix,
2293 self.splitter.delims,
2294 extra_prefix=tuple_prefix
2295 )
2237 2296 if not matches:
2238 2297 return matches
2239 2298
2240 2299 # get the cursor position of
2241 2300 # - the text being completed
2242 2301 # - the start of the key text
2243 2302 # - the start of the completion
2244 2303 text_start = len(self.text_until_cursor) - len(text)
2245 if prefix:
2304 if key_prefix:
2246 2305 key_start = match.start(3)
2247 2306 completion_start = key_start + token_offset
2248 2307 else:
2249 2308 key_start = completion_start = match.end()
2250 2309
2251 2310 # grab the leading prefix, to make sure all completions start with `text`
2252 2311 if text_start > key_start:
2253 2312 leading = ''
2254 2313 else:
2255 2314 leading = text[text_start:completion_start]
2256 2315
2257 2316 # the index of the `[` character
2258 2317 bracket_idx = match.end(1)
2259 2318
2260 2319 # append closing quote and bracket as appropriate
2261 2320 # this is *not* appropriate if the opening quote or bracket is outside
2262 2321 # the text given to this method
2263 2322 suf = ''
2264 2323 continuation = self.line_buffer[len(self.text_until_cursor):]
2265 2324 if key_start > text_start and closing_quote:
2266 2325 # quotes were opened inside text, maybe close them
2267 2326 if continuation.startswith(closing_quote):
2268 2327 continuation = continuation[len(closing_quote):]
2269 2328 else:
2270 2329 suf += closing_quote
2271 2330 if bracket_idx > text_start:
2272 2331 # brackets were opened inside text, maybe close them
2273 2332 if not continuation.startswith(']'):
2274 2333 suf += ']'
2275 2334
2276 2335 return [leading + k + suf for k in matches]
2277 2336
2278 2337 @context_matcher()
2279 2338 def unicode_name_matcher(self, context: CompletionContext):
2280 2339 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2281 2340 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2282 2341 return _convert_matcher_v1_result_to_v2(
2283 2342 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2284 2343 )
2285 2344
2286 2345 @staticmethod
2287 2346 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2288 2347 """Match Latex-like syntax for unicode characters base
2289 2348 on the name of the character.
2290 2349
2291 2350 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2292 2351
2293 2352 Works only on valid python 3 identifier, or on combining characters that
2294 2353 will combine to form a valid identifier.
2295 2354 """
2296 2355 slashpos = text.rfind('\\')
2297 2356 if slashpos > -1:
2298 2357 s = text[slashpos+1:]
2299 2358 try :
2300 2359 unic = unicodedata.lookup(s)
2301 2360 # allow combining chars
2302 2361 if ('a'+unic).isidentifier():
2303 2362 return '\\'+s,[unic]
2304 2363 except KeyError:
2305 2364 pass
2306 2365 return '', []
2307 2366
2308 2367 @context_matcher()
2309 2368 def latex_name_matcher(self, context: CompletionContext):
2310 2369 """Match Latex syntax for unicode characters.
2311 2370
2312 2371 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2313 2372 """
2314 2373 fragment, matches = self.latex_matches(context.text_until_cursor)
2315 2374 return _convert_matcher_v1_result_to_v2(
2316 2375 matches, type="latex", fragment=fragment, suppress_if_matches=True
2317 2376 )
2318 2377
2319 2378 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2320 2379 """Match Latex syntax for unicode characters.
2321 2380
2322 2381 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2323 2382
2324 2383 .. deprecated:: 8.6
2325 2384 You can use :meth:`latex_name_matcher` instead.
2326 2385 """
2327 2386 slashpos = text.rfind('\\')
2328 2387 if slashpos > -1:
2329 2388 s = text[slashpos:]
2330 2389 if s in latex_symbols:
2331 2390 # Try to complete a full latex symbol to unicode
2332 2391 # \\alpha -> Ξ±
2333 2392 return s, [latex_symbols[s]]
2334 2393 else:
2335 2394 # If a user has partially typed a latex symbol, give them
2336 2395 # a full list of options \al -> [\aleph, \alpha]
2337 2396 matches = [k for k in latex_symbols if k.startswith(s)]
2338 2397 if matches:
2339 2398 return s, matches
2340 2399 return '', ()
2341 2400
2342 2401 @context_matcher()
2343 2402 def custom_completer_matcher(self, context):
2344 2403 """Dispatch custom completer.
2345 2404
2346 2405 If a match is found, suppresses all other matchers except for Jedi.
2347 2406 """
2348 2407 matches = self.dispatch_custom_completer(context.token) or []
2349 2408 result = _convert_matcher_v1_result_to_v2(
2350 2409 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2351 2410 )
2352 2411 result["ordered"] = True
2353 2412 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2354 2413 return result
2355 2414
2356 2415 def dispatch_custom_completer(self, text):
2357 2416 """
2358 2417 .. deprecated:: 8.6
2359 2418 You can use :meth:`custom_completer_matcher` instead.
2360 2419 """
2361 2420 if not self.custom_completers:
2362 2421 return
2363 2422
2364 2423 line = self.line_buffer
2365 2424 if not line.strip():
2366 2425 return None
2367 2426
2368 2427 # Create a little structure to pass all the relevant information about
2369 2428 # the current completion to any custom completer.
2370 2429 event = SimpleNamespace()
2371 2430 event.line = line
2372 2431 event.symbol = text
2373 2432 cmd = line.split(None,1)[0]
2374 2433 event.command = cmd
2375 2434 event.text_until_cursor = self.text_until_cursor
2376 2435
2377 2436 # for foo etc, try also to find completer for %foo
2378 2437 if not cmd.startswith(self.magic_escape):
2379 2438 try_magic = self.custom_completers.s_matches(
2380 2439 self.magic_escape + cmd)
2381 2440 else:
2382 2441 try_magic = []
2383 2442
2384 2443 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2385 2444 try_magic,
2386 2445 self.custom_completers.flat_matches(self.text_until_cursor)):
2387 2446 try:
2388 2447 res = c(event)
2389 2448 if res:
2390 2449 # first, try case sensitive match
2391 2450 withcase = [r for r in res if r.startswith(text)]
2392 2451 if withcase:
2393 2452 return withcase
2394 2453 # if none, then case insensitive ones are ok too
2395 2454 text_low = text.lower()
2396 2455 return [r for r in res if r.lower().startswith(text_low)]
2397 2456 except TryNext:
2398 2457 pass
2399 2458 except KeyboardInterrupt:
2400 2459 """
2401 2460 If custom completer take too long,
2402 2461 let keyboard interrupt abort and return nothing.
2403 2462 """
2404 2463 break
2405 2464
2406 2465 return None
2407 2466
2408 2467 def completions(self, text: str, offset: int)->Iterator[Completion]:
2409 2468 """
2410 2469 Returns an iterator over the possible completions
2411 2470
2412 2471 .. warning::
2413 2472
2414 2473 Unstable
2415 2474
2416 2475 This function is unstable, API may change without warning.
2417 2476 It will also raise unless use in proper context manager.
2418 2477
2419 2478 Parameters
2420 2479 ----------
2421 2480 text : str
2422 2481 Full text of the current input, multi line string.
2423 2482 offset : int
2424 2483 Integer representing the position of the cursor in ``text``. Offset
2425 2484 is 0-based indexed.
2426 2485
2427 2486 Yields
2428 2487 ------
2429 2488 Completion
2430 2489
2431 2490 Notes
2432 2491 -----
2433 2492 The cursor on a text can either be seen as being "in between"
2434 2493 characters or "On" a character depending on the interface visible to
2435 2494 the user. For consistency the cursor being on "in between" characters X
2436 2495 and Y is equivalent to the cursor being "on" character Y, that is to say
2437 2496 the character the cursor is on is considered as being after the cursor.
2438 2497
2439 2498 Combining characters may span more that one position in the
2440 2499 text.
2441 2500
2442 2501 .. note::
2443 2502
2444 2503 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2445 2504 fake Completion token to distinguish completion returned by Jedi
2446 2505 and usual IPython completion.
2447 2506
2448 2507 .. note::
2449 2508
2450 2509 Completions are not completely deduplicated yet. If identical
2451 2510 completions are coming from different sources this function does not
2452 2511 ensure that each completion object will only be present once.
2453 2512 """
2454 2513 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2455 2514 "It may change without warnings. "
2456 2515 "Use in corresponding context manager.",
2457 2516 category=ProvisionalCompleterWarning, stacklevel=2)
2458 2517
2459 2518 seen = set()
2460 2519 profiler:Optional[cProfile.Profile]
2461 2520 try:
2462 2521 if self.profile_completions:
2463 2522 import cProfile
2464 2523 profiler = cProfile.Profile()
2465 2524 profiler.enable()
2466 2525 else:
2467 2526 profiler = None
2468 2527
2469 2528 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2470 2529 if c and (c in seen):
2471 2530 continue
2472 2531 yield c
2473 2532 seen.add(c)
2474 2533 except KeyboardInterrupt:
2475 2534 """if completions take too long and users send keyboard interrupt,
2476 2535 do not crash and return ASAP. """
2477 2536 pass
2478 2537 finally:
2479 2538 if profiler is not None:
2480 2539 profiler.disable()
2481 2540 ensure_dir_exists(self.profiler_output_dir)
2482 2541 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2483 2542 print("Writing profiler output to", output_path)
2484 2543 profiler.dump_stats(output_path)
2485 2544
2486 2545 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2487 2546 """
2488 2547 Core completion module.Same signature as :any:`completions`, with the
2489 2548 extra `timeout` parameter (in seconds).
2490 2549
2491 2550 Computing jedi's completion ``.type`` can be quite expensive (it is a
2492 2551 lazy property) and can require some warm-up, more warm up than just
2493 2552 computing the ``name`` of a completion. The warm-up can be :
2494 2553
2495 2554 - Long warm-up the first time a module is encountered after
2496 2555 install/update: actually build parse/inference tree.
2497 2556
2498 2557 - first time the module is encountered in a session: load tree from
2499 2558 disk.
2500 2559
2501 2560 We don't want to block completions for tens of seconds so we give the
2502 2561 completer a "budget" of ``_timeout`` seconds per invocation to compute
2503 2562 completions types, the completions that have not yet been computed will
2504 2563 be marked as "unknown" an will have a chance to be computed next round
2505 2564 are things get cached.
2506 2565
2507 2566 Keep in mind that Jedi is not the only thing treating the completion so
2508 2567 keep the timeout short-ish as if we take more than 0.3 second we still
2509 2568 have lots of processing to do.
2510 2569
2511 2570 """
2512 2571 deadline = time.monotonic() + _timeout
2513 2572
2514 2573 before = full_text[:offset]
2515 2574 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2516 2575
2517 2576 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2518 2577
2519 2578 results = self._complete(
2520 2579 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2521 2580 )
2522 2581 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2523 2582 identifier: result
2524 2583 for identifier, result in results.items()
2525 2584 if identifier != jedi_matcher_id
2526 2585 }
2527 2586
2528 2587 jedi_matches = (
2529 2588 cast(results[jedi_matcher_id], _JediMatcherResult)["completions"]
2530 2589 if jedi_matcher_id in results
2531 2590 else ()
2532 2591 )
2533 2592
2534 2593 iter_jm = iter(jedi_matches)
2535 2594 if _timeout:
2536 2595 for jm in iter_jm:
2537 2596 try:
2538 2597 type_ = jm.type
2539 2598 except Exception:
2540 2599 if self.debug:
2541 2600 print("Error in Jedi getting type of ", jm)
2542 2601 type_ = None
2543 2602 delta = len(jm.name_with_symbols) - len(jm.complete)
2544 2603 if type_ == 'function':
2545 2604 signature = _make_signature(jm)
2546 2605 else:
2547 2606 signature = ''
2548 2607 yield Completion(start=offset - delta,
2549 2608 end=offset,
2550 2609 text=jm.name_with_symbols,
2551 2610 type=type_,
2552 2611 signature=signature,
2553 2612 _origin='jedi')
2554 2613
2555 2614 if time.monotonic() > deadline:
2556 2615 break
2557 2616
2558 2617 for jm in iter_jm:
2559 2618 delta = len(jm.name_with_symbols) - len(jm.complete)
2560 2619 yield Completion(
2561 2620 start=offset - delta,
2562 2621 end=offset,
2563 2622 text=jm.name_with_symbols,
2564 2623 type=_UNKNOWN_TYPE, # don't compute type for speed
2565 2624 _origin="jedi",
2566 2625 signature="",
2567 2626 )
2568 2627
2569 2628 # TODO:
2570 2629 # Suppress this, right now just for debug.
2571 2630 if jedi_matches and non_jedi_results and self.debug:
2572 2631 some_start_offset = before.rfind(
2573 2632 next(iter(non_jedi_results.values()))["matched_fragment"]
2574 2633 )
2575 2634 yield Completion(
2576 2635 start=some_start_offset,
2577 2636 end=offset,
2578 2637 text="--jedi/ipython--",
2579 2638 _origin="debug",
2580 2639 type="none",
2581 2640 signature="",
2582 2641 )
2583 2642
2584 2643 ordered = []
2585 2644 sortable = []
2586 2645
2587 2646 for origin, result in non_jedi_results.items():
2588 2647 matched_text = result["matched_fragment"]
2589 2648 start_offset = before.rfind(matched_text)
2590 2649 is_ordered = result.get("ordered", False)
2591 2650 container = ordered if is_ordered else sortable
2592 2651
2593 2652 # I'm unsure if this is always true, so let's assert and see if it
2594 2653 # crash
2595 2654 assert before.endswith(matched_text)
2596 2655
2597 2656 for simple_completion in result["completions"]:
2598 2657 completion = Completion(
2599 2658 start=start_offset,
2600 2659 end=offset,
2601 2660 text=simple_completion.text,
2602 2661 _origin=origin,
2603 2662 signature="",
2604 2663 type=simple_completion.type or _UNKNOWN_TYPE,
2605 2664 )
2606 2665 container.append(completion)
2607 2666
2608 2667 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
2609 2668 :MATCHES_LIMIT
2610 2669 ]
2611 2670
2612 2671 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2613 2672 """Find completions for the given text and line context.
2614 2673
2615 2674 Note that both the text and the line_buffer are optional, but at least
2616 2675 one of them must be given.
2617 2676
2618 2677 Parameters
2619 2678 ----------
2620 2679 text : string, optional
2621 2680 Text to perform the completion on. If not given, the line buffer
2622 2681 is split using the instance's CompletionSplitter object.
2623 2682 line_buffer : string, optional
2624 2683 If not given, the completer attempts to obtain the current line
2625 2684 buffer via readline. This keyword allows clients which are
2626 2685 requesting for text completions in non-readline contexts to inform
2627 2686 the completer of the entire text.
2628 2687 cursor_pos : int, optional
2629 2688 Index of the cursor in the full line buffer. Should be provided by
2630 2689 remote frontends where kernel has no access to frontend state.
2631 2690
2632 2691 Returns
2633 2692 -------
2634 2693 Tuple of two items:
2635 2694 text : str
2636 2695 Text that was actually used in the completion.
2637 2696 matches : list
2638 2697 A list of completion matches.
2639 2698
2640 2699 Notes
2641 2700 -----
2642 2701 This API is likely to be deprecated and replaced by
2643 2702 :any:`IPCompleter.completions` in the future.
2644 2703
2645 2704 """
2646 2705 warnings.warn('`Completer.complete` is pending deprecation since '
2647 2706 'IPython 6.0 and will be replaced by `Completer.completions`.',
2648 2707 PendingDeprecationWarning)
2649 2708 # potential todo, FOLD the 3rd throw away argument of _complete
2650 2709 # into the first 2 one.
2651 2710 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
2652 2711 # TODO: should we deprecate now, or does it stay?
2653 2712
2654 2713 results = self._complete(
2655 2714 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
2656 2715 )
2657 2716
2658 2717 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2659 2718
2660 2719 return self._arrange_and_extract(
2661 2720 results,
2662 2721 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
2663 2722 skip_matchers={jedi_matcher_id},
2664 2723 # this API does not support different start/end positions (fragments of token).
2665 2724 abort_if_offset_changes=True,
2666 2725 )
2667 2726
2668 2727 def _arrange_and_extract(
2669 2728 self,
2670 2729 results: Dict[str, MatcherResult],
2671 2730 skip_matchers: Set[str],
2672 2731 abort_if_offset_changes: bool,
2673 2732 ):
2674 2733
2675 2734 sortable = []
2676 2735 ordered = []
2677 2736 most_recent_fragment = None
2678 2737 for identifier, result in results.items():
2679 2738 if identifier in skip_matchers:
2680 2739 continue
2681 2740 if not result["completions"]:
2682 2741 continue
2683 2742 if not most_recent_fragment:
2684 2743 most_recent_fragment = result["matched_fragment"]
2685 2744 if (
2686 2745 abort_if_offset_changes
2687 2746 and result["matched_fragment"] != most_recent_fragment
2688 2747 ):
2689 2748 break
2690 2749 if result.get("ordered", False):
2691 2750 ordered.extend(result["completions"])
2692 2751 else:
2693 2752 sortable.extend(result["completions"])
2694 2753
2695 2754 if not most_recent_fragment:
2696 2755 most_recent_fragment = "" # to satisfy typechecker (and just in case)
2697 2756
2698 2757 return most_recent_fragment, [
2699 2758 m.text for m in self._deduplicate(ordered + self._sort(sortable))
2700 2759 ]
2701 2760
2702 2761 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2703 2762 full_text=None) -> _CompleteResult:
2704 2763 """
2705 2764 Like complete but can also returns raw jedi completions as well as the
2706 2765 origin of the completion text. This could (and should) be made much
2707 2766 cleaner but that will be simpler once we drop the old (and stateful)
2708 2767 :any:`complete` API.
2709 2768
2710 2769 With current provisional API, cursor_pos act both (depending on the
2711 2770 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2712 2771 ``column`` when passing multiline strings this could/should be renamed
2713 2772 but would add extra noise.
2714 2773
2715 2774 Parameters
2716 2775 ----------
2717 2776 cursor_line
2718 2777 Index of the line the cursor is on. 0 indexed.
2719 2778 cursor_pos
2720 2779 Position of the cursor in the current line/line_buffer/text. 0
2721 2780 indexed.
2722 2781 line_buffer : optional, str
2723 2782 The current line the cursor is in, this is mostly due to legacy
2724 2783 reason that readline could only give a us the single current line.
2725 2784 Prefer `full_text`.
2726 2785 text : str
2727 2786 The current "token" the cursor is in, mostly also for historical
2728 2787 reasons. as the completer would trigger only after the current line
2729 2788 was parsed.
2730 2789 full_text : str
2731 2790 Full text of the current cell.
2732 2791
2733 2792 Returns
2734 2793 -------
2735 2794 An ordered dictionary where keys are identifiers of completion
2736 2795 matchers and values are ``MatcherResult``s.
2737 2796 """
2738 2797
2739 2798 # if the cursor position isn't given, the only sane assumption we can
2740 2799 # make is that it's at the end of the line (the common case)
2741 2800 if cursor_pos is None:
2742 2801 cursor_pos = len(line_buffer) if text is None else len(text)
2743 2802
2744 2803 if self.use_main_ns:
2745 2804 self.namespace = __main__.__dict__
2746 2805
2747 2806 # if text is either None or an empty string, rely on the line buffer
2748 2807 if (not line_buffer) and full_text:
2749 2808 line_buffer = full_text.split('\n')[cursor_line]
2750 2809 if not text: # issue #11508: check line_buffer before calling split_line
2751 2810 text = (
2752 2811 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
2753 2812 )
2754 2813
2755 2814 # If no line buffer is given, assume the input text is all there was
2756 2815 if line_buffer is None:
2757 2816 line_buffer = text
2758 2817
2759 2818 # deprecated - do not use `line_buffer` in new code.
2760 2819 self.line_buffer = line_buffer
2761 2820 self.text_until_cursor = self.line_buffer[:cursor_pos]
2762 2821
2763 2822 if not full_text:
2764 2823 full_text = line_buffer
2765 2824
2766 2825 context = CompletionContext(
2767 2826 full_text=full_text,
2768 2827 cursor_position=cursor_pos,
2769 2828 cursor_line=cursor_line,
2770 2829 token=text,
2771 2830 limit=MATCHES_LIMIT,
2772 2831 )
2773 2832
2774 2833 # Start with a clean slate of completions
2775 2834 results = {}
2776 2835
2777 2836 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2778 2837
2779 2838 suppressed_matchers = set()
2780 2839
2781 2840 matchers = {
2782 2841 _get_matcher_id(matcher): matcher
2783 2842 for matcher in sorted(
2784 2843 self.matchers, key=_get_matcher_priority, reverse=True
2785 2844 )
2786 2845 }
2787 2846
2788 2847 for matcher_id, matcher in matchers.items():
2789 2848 api_version = _get_matcher_api_version(matcher)
2790 2849 matcher_id = _get_matcher_id(matcher)
2791 2850
2792 2851 if matcher_id in self.disable_matchers:
2793 2852 continue
2794 2853
2795 2854 if matcher_id in results:
2796 2855 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
2797 2856
2798 2857 if matcher_id in suppressed_matchers:
2799 2858 continue
2800 2859
2801 2860 try:
2802 2861 if api_version == 1:
2803 2862 result = _convert_matcher_v1_result_to_v2(
2804 2863 matcher(text), type=_UNKNOWN_TYPE
2805 2864 )
2806 2865 elif api_version == 2:
2807 2866 result = cast(matcher, MatcherAPIv2)(context)
2808 2867 else:
2809 2868 raise ValueError(f"Unsupported API version {api_version}")
2810 2869 except:
2811 2870 # Show the ugly traceback if the matcher causes an
2812 2871 # exception, but do NOT crash the kernel!
2813 2872 sys.excepthook(*sys.exc_info())
2814 2873 continue
2815 2874
2816 2875 # set default value for matched fragment if suffix was not selected.
2817 2876 result["matched_fragment"] = result.get("matched_fragment", context.token)
2818 2877
2819 2878 if not suppressed_matchers:
2820 2879 suppression_recommended = result.get("suppress", False)
2821 2880
2822 2881 suppression_config = (
2823 2882 self.suppress_competing_matchers.get(matcher_id, None)
2824 2883 if isinstance(self.suppress_competing_matchers, dict)
2825 2884 else self.suppress_competing_matchers
2826 2885 )
2827 2886 should_suppress = (
2828 2887 (suppression_config is True)
2829 2888 or (suppression_recommended and (suppression_config is not False))
2830 2889 ) and has_any_completions(result)
2831 2890
2832 2891 if should_suppress:
2833 2892 suppression_exceptions = result.get("do_not_suppress", set())
2834 2893 try:
2835 2894 to_suppress = set(suppression_recommended)
2836 2895 except TypeError:
2837 2896 to_suppress = set(matchers)
2838 2897 suppressed_matchers = to_suppress - suppression_exceptions
2839 2898
2840 2899 new_results = {}
2841 2900 for previous_matcher_id, previous_result in results.items():
2842 2901 if previous_matcher_id not in suppressed_matchers:
2843 2902 new_results[previous_matcher_id] = previous_result
2844 2903 results = new_results
2845 2904
2846 2905 results[matcher_id] = result
2847 2906
2848 2907 _, matches = self._arrange_and_extract(
2849 2908 results,
2850 2909 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
2851 2910 # if it was omission, we can remove the filtering step, otherwise remove this comment.
2852 2911 skip_matchers={jedi_matcher_id},
2853 2912 abort_if_offset_changes=False,
2854 2913 )
2855 2914
2856 2915 # populate legacy stateful API
2857 2916 self.matches = matches
2858 2917
2859 2918 return results
2860 2919
2861 2920 @staticmethod
2862 2921 def _deduplicate(
2863 2922 matches: Sequence[SimpleCompletion],
2864 2923 ) -> Iterable[SimpleCompletion]:
2865 2924 filtered_matches = {}
2866 2925 for match in matches:
2867 2926 text = match.text
2868 2927 if (
2869 2928 text not in filtered_matches
2870 2929 or filtered_matches[text].type == _UNKNOWN_TYPE
2871 2930 ):
2872 2931 filtered_matches[text] = match
2873 2932
2874 2933 return filtered_matches.values()
2875 2934
2876 2935 @staticmethod
2877 2936 def _sort(matches: Sequence[SimpleCompletion]):
2878 2937 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
2879 2938
2880 2939 @context_matcher()
2881 2940 def fwd_unicode_matcher(self, context: CompletionContext):
2882 2941 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
2883 2942 # TODO: use `context.limit` to terminate early once we matched the maximum
2884 2943 # number that will be used downstream; can be added as an optional to
2885 2944 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
2886 2945 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
2887 2946 return _convert_matcher_v1_result_to_v2(
2888 2947 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2889 2948 )
2890 2949
2891 2950 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
2892 2951 """
2893 2952 Forward match a string starting with a backslash with a list of
2894 2953 potential Unicode completions.
2895 2954
2896 2955 Will compute list of Unicode character names on first call and cache it.
2897 2956
2898 2957 .. deprecated:: 8.6
2899 2958 You can use :meth:`fwd_unicode_matcher` instead.
2900 2959
2901 2960 Returns
2902 2961 -------
2903 2962 At tuple with:
2904 2963 - matched text (empty if no matches)
2905 2964 - list of potential completions, empty tuple otherwise)
2906 2965 """
2907 2966 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2908 2967 # We could do a faster match using a Trie.
2909 2968
2910 2969 # Using pygtrie the following seem to work:
2911 2970
2912 2971 # s = PrefixSet()
2913 2972
2914 2973 # for c in range(0,0x10FFFF + 1):
2915 2974 # try:
2916 2975 # s.add(unicodedata.name(chr(c)))
2917 2976 # except ValueError:
2918 2977 # pass
2919 2978 # [''.join(k) for k in s.iter(prefix)]
2920 2979
2921 2980 # But need to be timed and adds an extra dependency.
2922 2981
2923 2982 slashpos = text.rfind('\\')
2924 2983 # if text starts with slash
2925 2984 if slashpos > -1:
2926 2985 # PERF: It's important that we don't access self._unicode_names
2927 2986 # until we're inside this if-block. _unicode_names is lazily
2928 2987 # initialized, and it takes a user-noticeable amount of time to
2929 2988 # initialize it, so we don't want to initialize it unless we're
2930 2989 # actually going to use it.
2931 2990 s = text[slashpos + 1 :]
2932 2991 sup = s.upper()
2933 2992 candidates = [x for x in self.unicode_names if x.startswith(sup)]
2934 2993 if candidates:
2935 2994 return s, candidates
2936 2995 candidates = [x for x in self.unicode_names if sup in x]
2937 2996 if candidates:
2938 2997 return s, candidates
2939 2998 splitsup = sup.split(" ")
2940 2999 candidates = [
2941 3000 x for x in self.unicode_names if all(u in x for u in splitsup)
2942 3001 ]
2943 3002 if candidates:
2944 3003 return s, candidates
2945 3004
2946 3005 return "", ()
2947 3006
2948 3007 # if text does not start with slash
2949 3008 else:
2950 3009 return '', ()
2951 3010
2952 3011 @property
2953 3012 def unicode_names(self) -> List[str]:
2954 3013 """List of names of unicode code points that can be completed.
2955 3014
2956 3015 The list is lazily initialized on first access.
2957 3016 """
2958 3017 if self._unicode_names is None:
2959 3018 names = []
2960 3019 for c in range(0,0x10FFFF + 1):
2961 3020 try:
2962 3021 names.append(unicodedata.name(chr(c)))
2963 3022 except ValueError:
2964 3023 pass
2965 3024 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2966 3025
2967 3026 return self._unicode_names
2968 3027
2969 3028 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2970 3029 names = []
2971 3030 for start,stop in ranges:
2972 3031 for c in range(start, stop) :
2973 3032 try:
2974 3033 names.append(unicodedata.name(chr(c)))
2975 3034 except ValueError:
2976 3035 pass
2977 3036 return names
@@ -1,1505 +1,1547 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import pytest
9 9 import sys
10 10 import textwrap
11 11 import unittest
12 12
13 13 from contextlib import contextmanager
14 14
15 15 from traitlets.config.loader import Config
16 16 from IPython import get_ipython
17 17 from IPython.core import completer
18 18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 19 from IPython.utils.generics import complete_object
20 20 from IPython.testing import decorators as dec
21 21
22 22 from IPython.core.completer import (
23 23 Completion,
24 24 provisionalcompleter,
25 25 match_dict_keys,
26 26 _deduplicate_completions,
27 27 completion_matcher,
28 28 SimpleCompletion,
29 29 CompletionContext,
30 30 )
31 31
32 32 # -----------------------------------------------------------------------------
33 33 # Test functions
34 34 # -----------------------------------------------------------------------------
35 35
36 36 def recompute_unicode_ranges():
37 37 """
38 38 utility to recompute the largest unicode range without any characters
39 39
40 40 use to recompute the gap in the global _UNICODE_RANGES of completer.py
41 41 """
42 42 import itertools
43 43 import unicodedata
44 44 valid = []
45 45 for c in range(0,0x10FFFF + 1):
46 46 try:
47 47 unicodedata.name(chr(c))
48 48 except ValueError:
49 49 continue
50 50 valid.append(c)
51 51
52 52 def ranges(i):
53 53 for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
54 54 b = list(b)
55 55 yield b[0][1], b[-1][1]
56 56
57 57 rg = list(ranges(valid))
58 58 lens = []
59 59 gap_lens = []
60 60 pstart, pstop = 0,0
61 61 for start, stop in rg:
62 62 lens.append(stop-start)
63 63 gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%'))
64 64 pstart, pstop = start, stop
65 65
66 66 return sorted(gap_lens)[-1]
67 67
68 68
69 69
70 70 def test_unicode_range():
71 71 """
72 72 Test that the ranges we test for unicode names give the same number of
73 73 results than testing the full length.
74 74 """
75 75 from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES
76 76
77 77 expected_list = _unicode_name_compute([(0, 0x110000)])
78 78 test = _unicode_name_compute(_UNICODE_RANGES)
79 79 len_exp = len(expected_list)
80 80 len_test = len(test)
81 81
82 82 # do not inline the len() or on error pytest will try to print the 130 000 +
83 83 # elements.
84 84 message = None
85 85 if len_exp != len_test or len_exp > 131808:
86 86 size, start, stop, prct = recompute_unicode_ranges()
87 87 message = f"""_UNICODE_RANGES likely wrong and need updating. This is
88 88 likely due to a new release of Python. We've find that the biggest gap
89 89 in unicode characters has reduces in size to be {size} characters
90 90 ({prct}), from {start}, to {stop}. In completer.py likely update to
91 91
92 92 _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)]
93 93
94 94 And update the assertion below to use
95 95
96 96 len_exp <= {len_exp}
97 97 """
98 98 assert len_exp == len_test, message
99 99
100 100 # fail if new unicode symbols have been added.
101 101 assert len_exp <= 138552, message
102 102
103 103
104 104 @contextmanager
105 105 def greedy_completion():
106 106 ip = get_ipython()
107 107 greedy_original = ip.Completer.greedy
108 108 try:
109 109 ip.Completer.greedy = True
110 110 yield
111 111 finally:
112 112 ip.Completer.greedy = greedy_original
113 113
114 114
115 115 @contextmanager
116 def evaluation_level(evaluation: str):
117 ip = get_ipython()
118 evaluation_original = ip.Completer.evaluation
119 try:
120 ip.Completer.evaluation = evaluation
121 yield
122 finally:
123 ip.Completer.evaluation = evaluation_original
124
125
126 @contextmanager
116 127 def custom_matchers(matchers):
117 128 ip = get_ipython()
118 129 try:
119 130 ip.Completer.custom_matchers.extend(matchers)
120 131 yield
121 132 finally:
122 133 ip.Completer.custom_matchers.clear()
123 134
124 135
125 136 def test_protect_filename():
126 137 if sys.platform == "win32":
127 138 pairs = [
128 139 ("abc", "abc"),
129 140 (" abc", '" abc"'),
130 141 ("a bc", '"a bc"'),
131 142 ("a bc", '"a bc"'),
132 143 (" bc", '" bc"'),
133 144 ]
134 145 else:
135 146 pairs = [
136 147 ("abc", "abc"),
137 148 (" abc", r"\ abc"),
138 149 ("a bc", r"a\ bc"),
139 150 ("a bc", r"a\ \ bc"),
140 151 (" bc", r"\ \ bc"),
141 152 # On posix, we also protect parens and other special characters.
142 153 ("a(bc", r"a\(bc"),
143 154 ("a)bc", r"a\)bc"),
144 155 ("a( )bc", r"a\(\ \)bc"),
145 156 ("a[1]bc", r"a\[1\]bc"),
146 157 ("a{1}bc", r"a\{1\}bc"),
147 158 ("a#bc", r"a\#bc"),
148 159 ("a?bc", r"a\?bc"),
149 160 ("a=bc", r"a\=bc"),
150 161 ("a\\bc", r"a\\bc"),
151 162 ("a|bc", r"a\|bc"),
152 163 ("a;bc", r"a\;bc"),
153 164 ("a:bc", r"a\:bc"),
154 165 ("a'bc", r"a\'bc"),
155 166 ("a*bc", r"a\*bc"),
156 167 ('a"bc', r"a\"bc"),
157 168 ("a^bc", r"a\^bc"),
158 169 ("a&bc", r"a\&bc"),
159 170 ]
160 171 # run the actual tests
161 172 for s1, s2 in pairs:
162 173 s1p = completer.protect_filename(s1)
163 174 assert s1p == s2
164 175
165 176
166 177 def check_line_split(splitter, test_specs):
167 178 for part1, part2, split in test_specs:
168 179 cursor_pos = len(part1)
169 180 line = part1 + part2
170 181 out = splitter.split_line(line, cursor_pos)
171 182 assert out == split
172 183
173 184
174 185 def test_line_split():
175 186 """Basic line splitter test with default specs."""
176 187 sp = completer.CompletionSplitter()
177 188 # The format of the test specs is: part1, part2, expected answer. Parts 1
178 189 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
179 190 # was at the end of part1. So an empty part2 represents someone hitting
180 191 # tab at the end of the line, the most common case.
181 192 t = [
182 193 ("run some/scrip", "", "some/scrip"),
183 194 ("run scripts/er", "ror.py foo", "scripts/er"),
184 195 ("echo $HOM", "", "HOM"),
185 196 ("print sys.pa", "", "sys.pa"),
186 197 ("print(sys.pa", "", "sys.pa"),
187 198 ("execfile('scripts/er", "", "scripts/er"),
188 199 ("a[x.", "", "x."),
189 200 ("a[x.", "y", "x."),
190 201 ('cd "some_file/', "", "some_file/"),
191 202 ]
192 203 check_line_split(sp, t)
193 204 # Ensure splitting works OK with unicode by re-running the tests with
194 205 # all inputs turned into unicode
195 206 check_line_split(sp, [map(str, p) for p in t])
196 207
197 208
198 209 class NamedInstanceClass:
199 210 instances = {}
200 211
201 212 def __init__(self, name):
202 213 self.instances[name] = self
203 214
204 215 @classmethod
205 216 def _ipython_key_completions_(cls):
206 217 return cls.instances.keys()
207 218
208 219
209 220 class KeyCompletable:
210 221 def __init__(self, things=()):
211 222 self.things = things
212 223
213 224 def _ipython_key_completions_(self):
214 225 return list(self.things)
215 226
216 227
217 228 class TestCompleter(unittest.TestCase):
218 229 def setUp(self):
219 230 """
220 231 We want to silence all PendingDeprecationWarning when testing the completer
221 232 """
222 233 self._assertwarns = self.assertWarns(PendingDeprecationWarning)
223 234 self._assertwarns.__enter__()
224 235
225 236 def tearDown(self):
226 237 try:
227 238 self._assertwarns.__exit__(None, None, None)
228 239 except AssertionError:
229 240 pass
230 241
231 242 def test_custom_completion_error(self):
232 243 """Test that errors from custom attribute completers are silenced."""
233 244 ip = get_ipython()
234 245
235 246 class A:
236 247 pass
237 248
238 249 ip.user_ns["x"] = A()
239 250
240 251 @complete_object.register(A)
241 252 def complete_A(a, existing_completions):
242 253 raise TypeError("this should be silenced")
243 254
244 255 ip.complete("x.")
245 256
246 257 def test_custom_completion_ordering(self):
247 258 """Test that errors from custom attribute completers are silenced."""
248 259 ip = get_ipython()
249 260
250 261 _, matches = ip.complete('in')
251 262 assert matches.index('input') < matches.index('int')
252 263
253 264 def complete_example(a):
254 265 return ['example2', 'example1']
255 266
256 267 ip.Completer.custom_completers.add_re('ex*', complete_example)
257 268 _, matches = ip.complete('ex')
258 269 assert matches.index('example2') < matches.index('example1')
259 270
260 271 def test_unicode_completions(self):
261 272 ip = get_ipython()
262 273 # Some strings that trigger different types of completion. Check them both
263 274 # in str and unicode forms
264 275 s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
265 276 for t in s + list(map(str, s)):
266 277 # We don't need to check exact completion values (they may change
267 278 # depending on the state of the namespace, but at least no exceptions
268 279 # should be thrown and the return value should be a pair of text, list
269 280 # values.
270 281 text, matches = ip.complete(t)
271 282 self.assertIsInstance(text, str)
272 283 self.assertIsInstance(matches, list)
273 284
274 285 def test_latex_completions(self):
275 286 from IPython.core.latex_symbols import latex_symbols
276 287 import random
277 288
278 289 ip = get_ipython()
279 290 # Test some random unicode symbols
280 291 keys = random.sample(sorted(latex_symbols), 10)
281 292 for k in keys:
282 293 text, matches = ip.complete(k)
283 294 self.assertEqual(text, k)
284 295 self.assertEqual(matches, [latex_symbols[k]])
285 296 # Test a more complex line
286 297 text, matches = ip.complete("print(\\alpha")
287 298 self.assertEqual(text, "\\alpha")
288 299 self.assertEqual(matches[0], latex_symbols["\\alpha"])
289 300 # Test multiple matching latex symbols
290 301 text, matches = ip.complete("\\al")
291 302 self.assertIn("\\alpha", matches)
292 303 self.assertIn("\\aleph", matches)
293 304
294 305 def test_latex_no_results(self):
295 306 """
296 307 forward latex should really return nothing in either field if nothing is found.
297 308 """
298 309 ip = get_ipython()
299 310 text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
300 311 self.assertEqual(text, "")
301 312 self.assertEqual(matches, ())
302 313
303 314 def test_back_latex_completion(self):
304 315 ip = get_ipython()
305 316
306 317 # do not return more than 1 matches for \beta, only the latex one.
307 318 name, matches = ip.complete("\\Ξ²")
308 319 self.assertEqual(matches, ["\\beta"])
309 320
310 321 def test_back_unicode_completion(self):
311 322 ip = get_ipython()
312 323
313 324 name, matches = ip.complete("\\β…€")
314 325 self.assertEqual(matches, ["\\ROMAN NUMERAL FIVE"])
315 326
316 327 def test_forward_unicode_completion(self):
317 328 ip = get_ipython()
318 329
319 330 name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
320 331 self.assertEqual(matches, ["β…€"]) # This is not a V
321 332 self.assertEqual(matches, ["\u2164"]) # same as above but explicit.
322 333
323 334 def test_delim_setting(self):
324 335 sp = completer.CompletionSplitter()
325 336 sp.delims = " "
326 337 self.assertEqual(sp.delims, " ")
327 338 self.assertEqual(sp._delim_expr, r"[\ ]")
328 339
329 340 def test_spaces(self):
330 341 """Test with only spaces as split chars."""
331 342 sp = completer.CompletionSplitter()
332 343 sp.delims = " "
333 344 t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
334 345 check_line_split(sp, t)
335 346
336 347 def test_has_open_quotes1(self):
337 348 for s in ["'", "'''", "'hi' '"]:
338 349 self.assertEqual(completer.has_open_quotes(s), "'")
339 350
340 351 def test_has_open_quotes2(self):
341 352 for s in ['"', '"""', '"hi" "']:
342 353 self.assertEqual(completer.has_open_quotes(s), '"')
343 354
344 355 def test_has_open_quotes3(self):
345 356 for s in ["''", "''' '''", "'hi' 'ipython'"]:
346 357 self.assertFalse(completer.has_open_quotes(s))
347 358
348 359 def test_has_open_quotes4(self):
349 360 for s in ['""', '""" """', '"hi" "ipython"']:
350 361 self.assertFalse(completer.has_open_quotes(s))
351 362
352 363 @pytest.mark.xfail(
353 364 sys.platform == "win32", reason="abspath completions fail on Windows"
354 365 )
355 366 def test_abspath_file_completions(self):
356 367 ip = get_ipython()
357 368 with TemporaryDirectory() as tmpdir:
358 369 prefix = os.path.join(tmpdir, "foo")
359 370 suffixes = ["1", "2"]
360 371 names = [prefix + s for s in suffixes]
361 372 for n in names:
362 373 open(n, "w", encoding="utf-8").close()
363 374
364 375 # Check simple completion
365 376 c = ip.complete(prefix)[1]
366 377 self.assertEqual(c, names)
367 378
368 379 # Now check with a function call
369 380 cmd = 'a = f("%s' % prefix
370 381 c = ip.complete(prefix, cmd)[1]
371 382 comp = [prefix + s for s in suffixes]
372 383 self.assertEqual(c, comp)
373 384
374 385 def test_local_file_completions(self):
375 386 ip = get_ipython()
376 387 with TemporaryWorkingDirectory():
377 388 prefix = "./foo"
378 389 suffixes = ["1", "2"]
379 390 names = [prefix + s for s in suffixes]
380 391 for n in names:
381 392 open(n, "w", encoding="utf-8").close()
382 393
383 394 # Check simple completion
384 395 c = ip.complete(prefix)[1]
385 396 self.assertEqual(c, names)
386 397
387 398 # Now check with a function call
388 399 cmd = 'a = f("%s' % prefix
389 400 c = ip.complete(prefix, cmd)[1]
390 401 comp = {prefix + s for s in suffixes}
391 402 self.assertTrue(comp.issubset(set(c)))
392 403
393 404 def test_quoted_file_completions(self):
394 405 ip = get_ipython()
395 406
396 407 def _(text):
397 408 return ip.Completer._complete(
398 409 cursor_line=0, cursor_pos=len(text), full_text=text
399 410 )["IPCompleter.file_matcher"]["completions"]
400 411
401 412 with TemporaryWorkingDirectory():
402 413 name = "foo'bar"
403 414 open(name, "w", encoding="utf-8").close()
404 415
405 416 # Don't escape Windows
406 417 escaped = name if sys.platform == "win32" else "foo\\'bar"
407 418
408 419 # Single quote matches embedded single quote
409 420 c = _("open('foo")[0]
410 421 self.assertEqual(c.text, escaped)
411 422
412 423 # Double quote requires no escape
413 424 c = _('open("foo')[0]
414 425 self.assertEqual(c.text, name)
415 426
416 427 # No quote requires an escape
417 428 c = _("%ls foo")[0]
418 429 self.assertEqual(c.text, escaped)
419 430
420 431 def test_all_completions_dups(self):
421 432 """
422 433 Make sure the output of `IPCompleter.all_completions` does not have
423 434 duplicated prefixes.
424 435 """
425 436 ip = get_ipython()
426 437 c = ip.Completer
427 438 ip.ex("class TestClass():\n\ta=1\n\ta1=2")
428 439 for jedi_status in [True, False]:
429 440 with provisionalcompleter():
430 441 ip.Completer.use_jedi = jedi_status
431 442 matches = c.all_completions("TestCl")
432 443 assert matches == ["TestClass"], (jedi_status, matches)
433 444 matches = c.all_completions("TestClass.")
434 445 assert len(matches) > 2, (jedi_status, matches)
435 446 matches = c.all_completions("TestClass.a")
436 447 assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
437 448
438 449 def test_jedi(self):
439 450 """
440 451 A couple of issue we had with Jedi
441 452 """
442 453 ip = get_ipython()
443 454
444 455 def _test_complete(reason, s, comp, start=None, end=None):
445 456 l = len(s)
446 457 start = start if start is not None else l
447 458 end = end if end is not None else l
448 459 with provisionalcompleter():
449 460 ip.Completer.use_jedi = True
450 461 completions = set(ip.Completer.completions(s, l))
451 462 ip.Completer.use_jedi = False
452 463 assert Completion(start, end, comp) in completions, reason
453 464
454 465 def _test_not_complete(reason, s, comp):
455 466 l = len(s)
456 467 with provisionalcompleter():
457 468 ip.Completer.use_jedi = True
458 469 completions = set(ip.Completer.completions(s, l))
459 470 ip.Completer.use_jedi = False
460 471 assert Completion(l, l, comp) not in completions, reason
461 472
462 473 import jedi
463 474
464 475 jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
465 476 if jedi_version > (0, 10):
466 477 _test_complete("jedi >0.9 should complete and not crash", "a=1;a.", "real")
467 478 _test_complete("can infer first argument", 'a=(1,"foo");a[0].', "real")
468 479 _test_complete("can infer second argument", 'a=(1,"foo");a[1].', "capitalize")
469 480 _test_complete("cover duplicate completions", "im", "import", 0, 2)
470 481
471 482 _test_not_complete("does not mix types", 'a=(1,"foo");a[0].', "capitalize")
472 483
473 484 def test_completion_have_signature(self):
474 485 """
475 486 Lets make sure jedi is capable of pulling out the signature of the function we are completing.
476 487 """
477 488 ip = get_ipython()
478 489 with provisionalcompleter():
479 490 ip.Completer.use_jedi = True
480 491 completions = ip.Completer.completions("ope", 3)
481 492 c = next(completions) # should be `open`
482 493 ip.Completer.use_jedi = False
483 494 assert "file" in c.signature, "Signature of function was not found by completer"
484 495 assert (
485 496 "encoding" in c.signature
486 497 ), "Signature of function was not found by completer"
487 498
488 499 def test_completions_have_type(self):
489 500 """
490 501 Lets make sure matchers provide completion type.
491 502 """
492 503 ip = get_ipython()
493 504 with provisionalcompleter():
494 505 ip.Completer.use_jedi = False
495 506 completions = ip.Completer.completions("%tim", 3)
496 507 c = next(completions) # should be `%time` or similar
497 508 assert c.type == "magic", "Type of magic was not assigned by completer"
498 509
499 510 @pytest.mark.xfail(reason="Known failure on jedi<=0.18.0")
500 511 def test_deduplicate_completions(self):
501 512 """
502 513 Test that completions are correctly deduplicated (even if ranges are not the same)
503 514 """
504 515 ip = get_ipython()
505 516 ip.ex(
506 517 textwrap.dedent(
507 518 """
508 519 class Z:
509 520 zoo = 1
510 521 """
511 522 )
512 523 )
513 524 with provisionalcompleter():
514 525 ip.Completer.use_jedi = True
515 526 l = list(
516 527 _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
517 528 )
518 529 ip.Completer.use_jedi = False
519 530
520 531 assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
521 532 assert l[0].text == "zoo" # and not `it.accumulate`
522 533
523 534 def test_greedy_completions(self):
524 535 """
525 536 Test the capability of the Greedy completer.
526 537
527 538 Most of the test here does not really show off the greedy completer, for proof
528 539 each of the text below now pass with Jedi. The greedy completer is capable of more.
529 540
530 541 See the :any:`test_dict_key_completion_contexts`
531 542
532 543 """
533 544 ip = get_ipython()
534 545 ip.ex("a=list(range(5))")
535 546 _, c = ip.complete(".", line="a[0].")
536 547 self.assertFalse(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
537 548
538 549 def _(line, cursor_pos, expect, message, completion):
539 550 with greedy_completion(), provisionalcompleter():
540 551 ip.Completer.use_jedi = False
541 552 _, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
542 553 self.assertIn(expect, c, message % c)
543 554
544 555 ip.Completer.use_jedi = True
545 556 with provisionalcompleter():
546 557 completions = ip.Completer.completions(line, cursor_pos)
547 558 self.assertIn(completion, completions)
548 559
549 560 with provisionalcompleter():
550 561 _(
551 562 "a[0].",
552 563 5,
553 564 "a[0].real",
554 565 "Should have completed on a[0].: %s",
555 566 Completion(5, 5, "real"),
556 567 )
557 568 _(
558 569 "a[0].r",
559 570 6,
560 571 "a[0].real",
561 572 "Should have completed on a[0].r: %s",
562 573 Completion(5, 6, "real"),
563 574 )
564 575
565 576 _(
566 577 "a[0].from_",
567 578 10,
568 579 "a[0].from_bytes",
569 580 "Should have completed on a[0].from_: %s",
570 581 Completion(5, 10, "from_bytes"),
571 582 )
572 583
573 584 def test_omit__names(self):
574 585 # also happens to test IPCompleter as a configurable
575 586 ip = get_ipython()
576 587 ip._hidden_attr = 1
577 588 ip._x = {}
578 589 c = ip.Completer
579 590 ip.ex("ip=get_ipython()")
580 591 cfg = Config()
581 592 cfg.IPCompleter.omit__names = 0
582 593 c.update_config(cfg)
583 594 with provisionalcompleter():
584 595 c.use_jedi = False
585 596 s, matches = c.complete("ip.")
586 597 self.assertIn("ip.__str__", matches)
587 598 self.assertIn("ip._hidden_attr", matches)
588 599
589 600 # c.use_jedi = True
590 601 # completions = set(c.completions('ip.', 3))
591 602 # self.assertIn(Completion(3, 3, '__str__'), completions)
592 603 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
593 604
594 605 cfg = Config()
595 606 cfg.IPCompleter.omit__names = 1
596 607 c.update_config(cfg)
597 608 with provisionalcompleter():
598 609 c.use_jedi = False
599 610 s, matches = c.complete("ip.")
600 611 self.assertNotIn("ip.__str__", matches)
601 612 # self.assertIn('ip._hidden_attr', matches)
602 613
603 614 # c.use_jedi = True
604 615 # completions = set(c.completions('ip.', 3))
605 616 # self.assertNotIn(Completion(3,3,'__str__'), completions)
606 617 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
607 618
608 619 cfg = Config()
609 620 cfg.IPCompleter.omit__names = 2
610 621 c.update_config(cfg)
611 622 with provisionalcompleter():
612 623 c.use_jedi = False
613 624 s, matches = c.complete("ip.")
614 625 self.assertNotIn("ip.__str__", matches)
615 626 self.assertNotIn("ip._hidden_attr", matches)
616 627
617 628 # c.use_jedi = True
618 629 # completions = set(c.completions('ip.', 3))
619 630 # self.assertNotIn(Completion(3,3,'__str__'), completions)
620 631 # self.assertNotIn(Completion(3,3, "_hidden_attr"), completions)
621 632
622 633 with provisionalcompleter():
623 634 c.use_jedi = False
624 635 s, matches = c.complete("ip._x.")
625 636 self.assertIn("ip._x.keys", matches)
626 637
627 638 # c.use_jedi = True
628 639 # completions = set(c.completions('ip._x.', 6))
629 640 # self.assertIn(Completion(6,6, "keys"), completions)
630 641
631 642 del ip._hidden_attr
632 643 del ip._x
633 644
634 645 def test_limit_to__all__False_ok(self):
635 646 """
636 647 Limit to all is deprecated, once we remove it this test can go away.
637 648 """
638 649 ip = get_ipython()
639 650 c = ip.Completer
640 651 c.use_jedi = False
641 652 ip.ex("class D: x=24")
642 653 ip.ex("d=D()")
643 654 cfg = Config()
644 655 cfg.IPCompleter.limit_to__all__ = False
645 656 c.update_config(cfg)
646 657 s, matches = c.complete("d.")
647 658 self.assertIn("d.x", matches)
648 659
649 660 def test_get__all__entries_ok(self):
650 661 class A:
651 662 __all__ = ["x", 1]
652 663
653 664 words = completer.get__all__entries(A())
654 665 self.assertEqual(words, ["x"])
655 666
656 667 def test_get__all__entries_no__all__ok(self):
657 668 class A:
658 669 pass
659 670
660 671 words = completer.get__all__entries(A())
661 672 self.assertEqual(words, [])
662 673
663 674 def test_func_kw_completions(self):
664 675 ip = get_ipython()
665 676 c = ip.Completer
666 677 c.use_jedi = False
667 678 ip.ex("def myfunc(a=1,b=2): return a+b")
668 679 s, matches = c.complete(None, "myfunc(1,b")
669 680 self.assertIn("b=", matches)
670 681 # Simulate completing with cursor right after b (pos==10):
671 682 s, matches = c.complete(None, "myfunc(1,b)", 10)
672 683 self.assertIn("b=", matches)
673 684 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
674 685 self.assertIn("b=", matches)
675 686 # builtin function
676 687 s, matches = c.complete(None, "min(k, k")
677 688 self.assertIn("key=", matches)
678 689
679 690 def test_default_arguments_from_docstring(self):
680 691 ip = get_ipython()
681 692 c = ip.Completer
682 693 kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
683 694 self.assertEqual(kwd, ["key"])
684 695 # with cython type etc
685 696 kwd = c._default_arguments_from_docstring(
686 697 "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
687 698 )
688 699 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
689 700 # white spaces
690 701 kwd = c._default_arguments_from_docstring(
691 702 "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
692 703 )
693 704 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
694 705
695 706 def test_line_magics(self):
696 707 ip = get_ipython()
697 708 c = ip.Completer
698 709 s, matches = c.complete(None, "lsmag")
699 710 self.assertIn("%lsmagic", matches)
700 711 s, matches = c.complete(None, "%lsmag")
701 712 self.assertIn("%lsmagic", matches)
702 713
703 714 def test_cell_magics(self):
704 715 from IPython.core.magic import register_cell_magic
705 716
706 717 @register_cell_magic
707 718 def _foo_cellm(line, cell):
708 719 pass
709 720
710 721 ip = get_ipython()
711 722 c = ip.Completer
712 723
713 724 s, matches = c.complete(None, "_foo_ce")
714 725 self.assertIn("%%_foo_cellm", matches)
715 726 s, matches = c.complete(None, "%%_foo_ce")
716 727 self.assertIn("%%_foo_cellm", matches)
717 728
718 729 def test_line_cell_magics(self):
719 730 from IPython.core.magic import register_line_cell_magic
720 731
721 732 @register_line_cell_magic
722 733 def _bar_cellm(line, cell):
723 734 pass
724 735
725 736 ip = get_ipython()
726 737 c = ip.Completer
727 738
728 739 # The policy here is trickier, see comments in completion code. The
729 740 # returned values depend on whether the user passes %% or not explicitly,
730 741 # and this will show a difference if the same name is both a line and cell
731 742 # magic.
732 743 s, matches = c.complete(None, "_bar_ce")
733 744 self.assertIn("%_bar_cellm", matches)
734 745 self.assertIn("%%_bar_cellm", matches)
735 746 s, matches = c.complete(None, "%_bar_ce")
736 747 self.assertIn("%_bar_cellm", matches)
737 748 self.assertIn("%%_bar_cellm", matches)
738 749 s, matches = c.complete(None, "%%_bar_ce")
739 750 self.assertNotIn("%_bar_cellm", matches)
740 751 self.assertIn("%%_bar_cellm", matches)
741 752
742 753 def test_magic_completion_order(self):
743 754 ip = get_ipython()
744 755 c = ip.Completer
745 756
746 757 # Test ordering of line and cell magics.
747 758 text, matches = c.complete("timeit")
748 759 self.assertEqual(matches, ["%timeit", "%%timeit"])
749 760
750 761 def test_magic_completion_shadowing(self):
751 762 ip = get_ipython()
752 763 c = ip.Completer
753 764 c.use_jedi = False
754 765
755 766 # Before importing matplotlib, %matplotlib magic should be the only option.
756 767 text, matches = c.complete("mat")
757 768 self.assertEqual(matches, ["%matplotlib"])
758 769
759 770 # The newly introduced name should shadow the magic.
760 771 ip.run_cell("matplotlib = 1")
761 772 text, matches = c.complete("mat")
762 773 self.assertEqual(matches, ["matplotlib"])
763 774
764 775 # After removing matplotlib from namespace, the magic should again be
765 776 # the only option.
766 777 del ip.user_ns["matplotlib"]
767 778 text, matches = c.complete("mat")
768 779 self.assertEqual(matches, ["%matplotlib"])
769 780
770 781 def test_magic_completion_shadowing_explicit(self):
771 782 """
772 783 If the user try to complete a shadowed magic, and explicit % start should
773 784 still return the completions.
774 785 """
775 786 ip = get_ipython()
776 787 c = ip.Completer
777 788
778 789 # Before importing matplotlib, %matplotlib magic should be the only option.
779 790 text, matches = c.complete("%mat")
780 791 self.assertEqual(matches, ["%matplotlib"])
781 792
782 793 ip.run_cell("matplotlib = 1")
783 794
784 795 # After removing matplotlib from namespace, the magic should still be
785 796 # the only option.
786 797 text, matches = c.complete("%mat")
787 798 self.assertEqual(matches, ["%matplotlib"])
788 799
789 800 def test_magic_config(self):
790 801 ip = get_ipython()
791 802 c = ip.Completer
792 803
793 804 s, matches = c.complete(None, "conf")
794 805 self.assertIn("%config", matches)
795 806 s, matches = c.complete(None, "conf")
796 807 self.assertNotIn("AliasManager", matches)
797 808 s, matches = c.complete(None, "config ")
798 809 self.assertIn("AliasManager", matches)
799 810 s, matches = c.complete(None, "%config ")
800 811 self.assertIn("AliasManager", matches)
801 812 s, matches = c.complete(None, "config Ali")
802 813 self.assertListEqual(["AliasManager"], matches)
803 814 s, matches = c.complete(None, "%config Ali")
804 815 self.assertListEqual(["AliasManager"], matches)
805 816 s, matches = c.complete(None, "config AliasManager")
806 817 self.assertListEqual(["AliasManager"], matches)
807 818 s, matches = c.complete(None, "%config AliasManager")
808 819 self.assertListEqual(["AliasManager"], matches)
809 820 s, matches = c.complete(None, "config AliasManager.")
810 821 self.assertIn("AliasManager.default_aliases", matches)
811 822 s, matches = c.complete(None, "%config AliasManager.")
812 823 self.assertIn("AliasManager.default_aliases", matches)
813 824 s, matches = c.complete(None, "config AliasManager.de")
814 825 self.assertListEqual(["AliasManager.default_aliases"], matches)
815 826 s, matches = c.complete(None, "config AliasManager.de")
816 827 self.assertListEqual(["AliasManager.default_aliases"], matches)
817 828
818 829 def test_magic_color(self):
819 830 ip = get_ipython()
820 831 c = ip.Completer
821 832
822 833 s, matches = c.complete(None, "colo")
823 834 self.assertIn("%colors", matches)
824 835 s, matches = c.complete(None, "colo")
825 836 self.assertNotIn("NoColor", matches)
826 837 s, matches = c.complete(None, "%colors") # No trailing space
827 838 self.assertNotIn("NoColor", matches)
828 839 s, matches = c.complete(None, "colors ")
829 840 self.assertIn("NoColor", matches)
830 841 s, matches = c.complete(None, "%colors ")
831 842 self.assertIn("NoColor", matches)
832 843 s, matches = c.complete(None, "colors NoCo")
833 844 self.assertListEqual(["NoColor"], matches)
834 845 s, matches = c.complete(None, "%colors NoCo")
835 846 self.assertListEqual(["NoColor"], matches)
836 847
837 848 def test_match_dict_keys(self):
838 849 """
839 850 Test that match_dict_keys works on a couple of use case does return what
840 851 expected, and does not crash
841 852 """
842 853 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
843 854
844 855 keys = ["foo", b"far"]
845 856 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
846 857 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
847 858 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
848 859 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
849 860
850 861 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
851 862 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
852 863 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
853 864 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
854 865
855 match_dict_keys
856
857 866 def test_match_dict_keys_tuple(self):
858 867 """
859 868 Test that match_dict_keys called with extra prefix works on a couple of use case,
860 869 does return what expected, and does not crash.
861 870 """
862 871 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
863 872
864 873 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
865 874
866 875 # Completion on first key == "foo"
867 876 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
868 877 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
869 878 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
870 879 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
871 880 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
872 881 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
873 882 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
874 883 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
875 884
876 885 # No Completion
877 886 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
878 887 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
879 888
880 889 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
881 890 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
882 891 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
883 892 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
884 893 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
885 894
895 keys = [("foo", 1111), ("foo", 2222), (3333, "bar"), (3333, 'test')]
896 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["1111", "2222"])
897 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=(3333,)) == ("'", 1, ["bar", "test"])
898 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("3333",)) == ("'", 1, [])
899
886 900 def test_dict_key_completion_string(self):
887 901 """Test dictionary key completion for string keys"""
888 902 ip = get_ipython()
889 903 complete = ip.Completer.complete
890 904
891 905 ip.user_ns["d"] = {"abc": None}
892 906
893 907 # check completion at different stages
894 908 _, matches = complete(line_buffer="d[")
895 909 self.assertIn("'abc'", matches)
896 910 self.assertNotIn("'abc']", matches)
897 911
898 912 _, matches = complete(line_buffer="d['")
899 913 self.assertIn("abc", matches)
900 914 self.assertNotIn("abc']", matches)
901 915
902 916 _, matches = complete(line_buffer="d['a")
903 917 self.assertIn("abc", matches)
904 918 self.assertNotIn("abc']", matches)
905 919
906 920 # check use of different quoting
907 921 _, matches = complete(line_buffer='d["')
908 922 self.assertIn("abc", matches)
909 923 self.assertNotIn('abc"]', matches)
910 924
911 925 _, matches = complete(line_buffer='d["a')
912 926 self.assertIn("abc", matches)
913 927 self.assertNotIn('abc"]', matches)
914 928
915 929 # check sensitivity to following context
916 930 _, matches = complete(line_buffer="d[]", cursor_pos=2)
917 931 self.assertIn("'abc'", matches)
918 932
919 933 _, matches = complete(line_buffer="d['']", cursor_pos=3)
920 934 self.assertIn("abc", matches)
921 935 self.assertNotIn("abc'", matches)
922 936 self.assertNotIn("abc']", matches)
923 937
924 938 # check multiple solutions are correctly returned and that noise is not
925 939 ip.user_ns["d"] = {
926 940 "abc": None,
927 941 "abd": None,
928 942 "bad": None,
929 943 object(): None,
930 944 5: None,
931 945 ("abe", None): None,
932 946 (None, "abf"): None
933 947 }
934 948
935 949 _, matches = complete(line_buffer="d['a")
936 950 self.assertIn("abc", matches)
937 951 self.assertIn("abd", matches)
938 952 self.assertNotIn("bad", matches)
939 953 self.assertNotIn("abe", matches)
940 954 self.assertNotIn("abf", matches)
941 955 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
942 956
943 957 # check escaping and whitespace
944 958 ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
945 959 _, matches = complete(line_buffer="d['a")
946 960 self.assertIn("a\\nb", matches)
947 961 self.assertIn("a\\'b", matches)
948 962 self.assertIn('a"b', matches)
949 963 self.assertIn("a word", matches)
950 964 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
951 965
952 966 # - can complete on non-initial word of the string
953 967 _, matches = complete(line_buffer="d['a w")
954 968 self.assertIn("word", matches)
955 969
956 970 # - understands quote escaping
957 971 _, matches = complete(line_buffer="d['a\\'")
958 972 self.assertIn("b", matches)
959 973
960 974 # - default quoting should work like repr
961 975 _, matches = complete(line_buffer="d[")
962 976 self.assertIn('"a\'b"', matches)
963 977
964 978 # - when opening quote with ", possible to match with unescaped apostrophe
965 979 _, matches = complete(line_buffer="d[\"a'")
966 980 self.assertIn("b", matches)
967 981
968 982 # need to not split at delims that readline won't split at
969 983 if "-" not in ip.Completer.splitter.delims:
970 984 ip.user_ns["d"] = {"before-after": None}
971 985 _, matches = complete(line_buffer="d['before-af")
972 986 self.assertIn("before-after", matches)
973 987
974 988 # check completion on tuple-of-string keys at different stage - on first key
975 989 ip.user_ns["d"] = {('foo', 'bar'): None}
976 990 _, matches = complete(line_buffer="d[")
977 991 self.assertIn("'foo'", matches)
978 992 self.assertNotIn("'foo']", matches)
979 993 self.assertNotIn("'bar'", matches)
980 994 self.assertNotIn("foo", matches)
981 995 self.assertNotIn("bar", matches)
982 996
983 997 # - match the prefix
984 998 _, matches = complete(line_buffer="d['f")
985 999 self.assertIn("foo", matches)
986 1000 self.assertNotIn("foo']", matches)
987 1001 self.assertNotIn('foo"]', matches)
988 1002 _, matches = complete(line_buffer="d['foo")
989 1003 self.assertIn("foo", matches)
990 1004
991 1005 # - can complete on second key
992 1006 _, matches = complete(line_buffer="d['foo', ")
993 1007 self.assertIn("'bar'", matches)
994 1008 _, matches = complete(line_buffer="d['foo', 'b")
995 1009 self.assertIn("bar", matches)
996 1010 self.assertNotIn("foo", matches)
997 1011
998 1012 # - does not propose missing keys
999 1013 _, matches = complete(line_buffer="d['foo', 'f")
1000 1014 self.assertNotIn("bar", matches)
1001 1015 self.assertNotIn("foo", matches)
1002 1016
1003 1017 # check sensitivity to following context
1004 1018 _, matches = complete(line_buffer="d['foo',]", cursor_pos=8)
1005 1019 self.assertIn("'bar'", matches)
1006 1020 self.assertNotIn("bar", matches)
1007 1021 self.assertNotIn("'foo'", matches)
1008 1022 self.assertNotIn("foo", matches)
1009 1023
1010 1024 _, matches = complete(line_buffer="d['']", cursor_pos=3)
1011 1025 self.assertIn("foo", matches)
1012 1026 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1013 1027
1014 1028 _, matches = complete(line_buffer='d[""]', cursor_pos=3)
1015 1029 self.assertIn("foo", matches)
1016 1030 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1017 1031
1018 1032 _, matches = complete(line_buffer='d["foo","]', cursor_pos=9)
1019 1033 self.assertIn("bar", matches)
1020 1034 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1021 1035
1022 1036 _, matches = complete(line_buffer='d["foo",]', cursor_pos=8)
1023 1037 self.assertIn("'bar'", matches)
1024 1038 self.assertNotIn("bar", matches)
1025 1039
1026 1040 # Can complete with longer tuple keys
1027 1041 ip.user_ns["d"] = {('foo', 'bar', 'foobar'): None}
1028 1042
1029 1043 # - can complete second key
1030 1044 _, matches = complete(line_buffer="d['foo', 'b")
1031 1045 self.assertIn("bar", matches)
1032 1046 self.assertNotIn("foo", matches)
1033 1047 self.assertNotIn("foobar", matches)
1034 1048
1035 1049 # - can complete third key
1036 1050 _, matches = complete(line_buffer="d['foo', 'bar', 'fo")
1037 1051 self.assertIn("foobar", matches)
1038 1052 self.assertNotIn("foo", matches)
1039 1053 self.assertNotIn("bar", matches)
1040 1054
1041 1055 def test_dict_key_completion_contexts(self):
1042 1056 """Test expression contexts in which dict key completion occurs"""
1043 1057 ip = get_ipython()
1044 1058 complete = ip.Completer.complete
1045 1059 d = {"abc": None}
1046 1060 ip.user_ns["d"] = d
1047 1061
1048 1062 class C:
1049 1063 data = d
1050 1064
1051 1065 ip.user_ns["C"] = C
1052 1066 ip.user_ns["get"] = lambda: d
1067 ip.user_ns["nested"] = {'x': d}
1053 1068
1054 1069 def assert_no_completion(**kwargs):
1055 1070 _, matches = complete(**kwargs)
1056 1071 self.assertNotIn("abc", matches)
1057 1072 self.assertNotIn("abc'", matches)
1058 1073 self.assertNotIn("abc']", matches)
1059 1074 self.assertNotIn("'abc'", matches)
1060 1075 self.assertNotIn("'abc']", matches)
1061 1076
1062 1077 def assert_completion(**kwargs):
1063 1078 _, matches = complete(**kwargs)
1064 1079 self.assertIn("'abc'", matches)
1065 1080 self.assertNotIn("'abc']", matches)
1066 1081
1067 1082 # no completion after string closed, even if reopened
1068 1083 assert_no_completion(line_buffer="d['a'")
1069 1084 assert_no_completion(line_buffer='d["a"')
1070 1085 assert_no_completion(line_buffer="d['a' + ")
1071 1086 assert_no_completion(line_buffer="d['a' + '")
1072 1087
1073 1088 # completion in non-trivial expressions
1074 1089 assert_completion(line_buffer="+ d[")
1075 1090 assert_completion(line_buffer="(d[")
1076 1091 assert_completion(line_buffer="C.data[")
1077 1092
1093 # nested dict completion
1094 assert_completion(line_buffer="nested['x'][")
1095
1096 with evaluation_level('minimal'):
1097 with pytest.raises(AssertionError):
1098 assert_completion(line_buffer="nested['x'][")
1099
1078 1100 # greedy flag
1079 1101 def assert_completion(**kwargs):
1080 1102 _, matches = complete(**kwargs)
1081 1103 self.assertIn("get()['abc']", matches)
1082 1104
1083 1105 assert_no_completion(line_buffer="get()[")
1084 1106 with greedy_completion():
1085 1107 assert_completion(line_buffer="get()[")
1086 1108 assert_completion(line_buffer="get()['")
1087 1109 assert_completion(line_buffer="get()['a")
1088 1110 assert_completion(line_buffer="get()['ab")
1089 1111 assert_completion(line_buffer="get()['abc")
1090 1112
1091 1113 def test_dict_key_completion_bytes(self):
1092 1114 """Test handling of bytes in dict key completion"""
1093 1115 ip = get_ipython()
1094 1116 complete = ip.Completer.complete
1095 1117
1096 1118 ip.user_ns["d"] = {"abc": None, b"abd": None}
1097 1119
1098 1120 _, matches = complete(line_buffer="d[")
1099 1121 self.assertIn("'abc'", matches)
1100 1122 self.assertIn("b'abd'", matches)
1101 1123
1102 1124 if False: # not currently implemented
1103 1125 _, matches = complete(line_buffer="d[b")
1104 1126 self.assertIn("b'abd'", matches)
1105 1127 self.assertNotIn("b'abc'", matches)
1106 1128
1107 1129 _, matches = complete(line_buffer="d[b'")
1108 1130 self.assertIn("abd", matches)
1109 1131 self.assertNotIn("abc", matches)
1110 1132
1111 1133 _, matches = complete(line_buffer="d[B'")
1112 1134 self.assertIn("abd", matches)
1113 1135 self.assertNotIn("abc", matches)
1114 1136
1115 1137 _, matches = complete(line_buffer="d['")
1116 1138 self.assertIn("abc", matches)
1117 1139 self.assertNotIn("abd", matches)
1118 1140
1119 1141 def test_dict_key_completion_unicode_py3(self):
1120 1142 """Test handling of unicode in dict key completion"""
1121 1143 ip = get_ipython()
1122 1144 complete = ip.Completer.complete
1123 1145
1124 1146 ip.user_ns["d"] = {"a\u05d0": None}
1125 1147
1126 1148 # query using escape
1127 1149 if sys.platform != "win32":
1128 1150 # Known failure on Windows
1129 1151 _, matches = complete(line_buffer="d['a\\u05d0")
1130 1152 self.assertIn("u05d0", matches) # tokenized after \\
1131 1153
1132 1154 # query using character
1133 1155 _, matches = complete(line_buffer="d['a\u05d0")
1134 1156 self.assertIn("a\u05d0", matches)
1135 1157
1136 1158 with greedy_completion():
1137 1159 # query using escape
1138 1160 _, matches = complete(line_buffer="d['a\\u05d0")
1139 1161 self.assertIn("d['a\\u05d0']", matches) # tokenized after \\
1140 1162
1141 1163 # query using character
1142 1164 _, matches = complete(line_buffer="d['a\u05d0")
1143 1165 self.assertIn("d['a\u05d0']", matches)
1144 1166
1145 1167 @dec.skip_without("numpy")
1146 1168 def test_struct_array_key_completion(self):
1147 1169 """Test dict key completion applies to numpy struct arrays"""
1148 1170 import numpy
1149 1171
1150 1172 ip = get_ipython()
1151 1173 complete = ip.Completer.complete
1152 1174 ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
1153 1175 _, matches = complete(line_buffer="d['")
1154 1176 self.assertIn("hello", matches)
1155 1177 self.assertIn("world", matches)
1156 1178 # complete on the numpy struct itself
1157 1179 dt = numpy.dtype(
1158 1180 [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
1159 1181 )
1160 1182 x = numpy.zeros(2, dtype=dt)
1161 1183 ip.user_ns["d"] = x[1]
1162 1184 _, matches = complete(line_buffer="d['")
1163 1185 self.assertIn("my_head", matches)
1164 1186 self.assertIn("my_data", matches)
1165 # complete on a nested level
1166 with greedy_completion():
1187 def completes_on_nested():
1167 1188 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1168 1189 _, matches = complete(line_buffer="d[1]['my_head']['")
1169 1190 self.assertTrue(any(["my_dt" in m for m in matches]))
1170 1191 self.assertTrue(any(["my_df" in m for m in matches]))
1192 # complete on a nested level
1193 with greedy_completion():
1194 completes_on_nested()
1195
1196 with evaluation_level('limitted'):
1197 completes_on_nested()
1198
1199 with evaluation_level('minimal'):
1200 with pytest.raises(AssertionError):
1201 completes_on_nested()
1171 1202
1172 1203 @dec.skip_without("pandas")
1173 1204 def test_dataframe_key_completion(self):
1174 1205 """Test dict key completion applies to pandas DataFrames"""
1175 1206 import pandas
1176 1207
1177 1208 ip = get_ipython()
1178 1209 complete = ip.Completer.complete
1179 1210 ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
1180 1211 _, matches = complete(line_buffer="d['")
1181 1212 self.assertIn("hello", matches)
1182 1213 self.assertIn("world", matches)
1214 _, matches = complete(line_buffer="d.loc[:, '")
1215 self.assertIn("hello", matches)
1216 self.assertIn("world", matches)
1217 _, matches = complete(line_buffer="d.loc[1:, '")
1218 self.assertIn("hello", matches)
1219 _, matches = complete(line_buffer="d.loc[1:1, '")
1220 self.assertIn("hello", matches)
1221 _, matches = complete(line_buffer="d.loc[1:1:-1, '")
1222 self.assertIn("hello", matches)
1223 _, matches = complete(line_buffer="d.loc[::, '")
1224 self.assertIn("hello", matches)
1183 1225
1184 1226 def test_dict_key_completion_invalids(self):
1185 1227 """Smoke test cases dict key completion can't handle"""
1186 1228 ip = get_ipython()
1187 1229 complete = ip.Completer.complete
1188 1230
1189 1231 ip.user_ns["no_getitem"] = None
1190 1232 ip.user_ns["no_keys"] = []
1191 1233 ip.user_ns["cant_call_keys"] = dict
1192 1234 ip.user_ns["empty"] = {}
1193 1235 ip.user_ns["d"] = {"abc": 5}
1194 1236
1195 1237 _, matches = complete(line_buffer="no_getitem['")
1196 1238 _, matches = complete(line_buffer="no_keys['")
1197 1239 _, matches = complete(line_buffer="cant_call_keys['")
1198 1240 _, matches = complete(line_buffer="empty['")
1199 1241 _, matches = complete(line_buffer="name_error['")
1200 1242 _, matches = complete(line_buffer="d['\\") # incomplete escape
1201 1243
1202 1244 def test_object_key_completion(self):
1203 1245 ip = get_ipython()
1204 1246 ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
1205 1247
1206 1248 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
1207 1249 self.assertIn("qwerty", matches)
1208 1250 self.assertIn("qwick", matches)
1209 1251
1210 1252 def test_class_key_completion(self):
1211 1253 ip = get_ipython()
1212 1254 NamedInstanceClass("qwerty")
1213 1255 NamedInstanceClass("qwick")
1214 1256 ip.user_ns["named_instance_class"] = NamedInstanceClass
1215 1257
1216 1258 _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
1217 1259 self.assertIn("qwerty", matches)
1218 1260 self.assertIn("qwick", matches)
1219 1261
1220 1262 def test_tryimport(self):
1221 1263 """
1222 1264 Test that try-import don't crash on trailing dot, and import modules before
1223 1265 """
1224 1266 from IPython.core.completerlib import try_import
1225 1267
1226 1268 assert try_import("IPython.")
1227 1269
1228 1270 def test_aimport_module_completer(self):
1229 1271 ip = get_ipython()
1230 1272 _, matches = ip.complete("i", "%aimport i")
1231 1273 self.assertIn("io", matches)
1232 1274 self.assertNotIn("int", matches)
1233 1275
1234 1276 def test_nested_import_module_completer(self):
1235 1277 ip = get_ipython()
1236 1278 _, matches = ip.complete(None, "import IPython.co", 17)
1237 1279 self.assertIn("IPython.core", matches)
1238 1280 self.assertNotIn("import IPython.core", matches)
1239 1281 self.assertNotIn("IPython.display", matches)
1240 1282
1241 1283 def test_import_module_completer(self):
1242 1284 ip = get_ipython()
1243 1285 _, matches = ip.complete("i", "import i")
1244 1286 self.assertIn("io", matches)
1245 1287 self.assertNotIn("int", matches)
1246 1288
1247 1289 def test_from_module_completer(self):
1248 1290 ip = get_ipython()
1249 1291 _, matches = ip.complete("B", "from io import B", 16)
1250 1292 self.assertIn("BytesIO", matches)
1251 1293 self.assertNotIn("BaseException", matches)
1252 1294
1253 1295 def test_snake_case_completion(self):
1254 1296 ip = get_ipython()
1255 1297 ip.Completer.use_jedi = False
1256 1298 ip.user_ns["some_three"] = 3
1257 1299 ip.user_ns["some_four"] = 4
1258 1300 _, matches = ip.complete("s_", "print(s_f")
1259 1301 self.assertIn("some_three", matches)
1260 1302 self.assertIn("some_four", matches)
1261 1303
1262 1304 def test_mix_terms(self):
1263 1305 ip = get_ipython()
1264 1306 from textwrap import dedent
1265 1307
1266 1308 ip.Completer.use_jedi = False
1267 1309 ip.ex(
1268 1310 dedent(
1269 1311 """
1270 1312 class Test:
1271 1313 def meth(self, meth_arg1):
1272 1314 print("meth")
1273 1315
1274 1316 def meth_1(self, meth1_arg1, meth1_arg2):
1275 1317 print("meth1")
1276 1318
1277 1319 def meth_2(self, meth2_arg1, meth2_arg2):
1278 1320 print("meth2")
1279 1321 test = Test()
1280 1322 """
1281 1323 )
1282 1324 )
1283 1325 _, matches = ip.complete(None, "test.meth(")
1284 1326 self.assertIn("meth_arg1=", matches)
1285 1327 self.assertNotIn("meth2_arg1=", matches)
1286 1328
1287 1329 def test_percent_symbol_restrict_to_magic_completions(self):
1288 1330 ip = get_ipython()
1289 1331 completer = ip.Completer
1290 1332 text = "%a"
1291 1333
1292 1334 with provisionalcompleter():
1293 1335 completer.use_jedi = True
1294 1336 completions = completer.completions(text, len(text))
1295 1337 for c in completions:
1296 1338 self.assertEqual(c.text[0], "%")
1297 1339
1298 1340 def test_fwd_unicode_restricts(self):
1299 1341 ip = get_ipython()
1300 1342 completer = ip.Completer
1301 1343 text = "\\ROMAN NUMERAL FIVE"
1302 1344
1303 1345 with provisionalcompleter():
1304 1346 completer.use_jedi = True
1305 1347 completions = [
1306 1348 completion.text for completion in completer.completions(text, len(text))
1307 1349 ]
1308 1350 self.assertEqual(completions, ["\u2164"])
1309 1351
1310 1352 def test_dict_key_restrict_to_dicts(self):
1311 1353 """Test that dict key suppresses non-dict completion items"""
1312 1354 ip = get_ipython()
1313 1355 c = ip.Completer
1314 1356 d = {"abc": None}
1315 1357 ip.user_ns["d"] = d
1316 1358
1317 1359 text = 'd["a'
1318 1360
1319 1361 def _():
1320 1362 with provisionalcompleter():
1321 1363 c.use_jedi = True
1322 1364 return [
1323 1365 completion.text for completion in c.completions(text, len(text))
1324 1366 ]
1325 1367
1326 1368 completions = _()
1327 1369 self.assertEqual(completions, ["abc"])
1328 1370
1329 1371 # check that it can be disabled in granular manner:
1330 1372 cfg = Config()
1331 1373 cfg.IPCompleter.suppress_competing_matchers = {
1332 1374 "IPCompleter.dict_key_matcher": False
1333 1375 }
1334 1376 c.update_config(cfg)
1335 1377
1336 1378 completions = _()
1337 1379 self.assertIn("abc", completions)
1338 1380 self.assertGreater(len(completions), 1)
1339 1381
1340 1382 def test_matcher_suppression(self):
1341 1383 @completion_matcher(identifier="a_matcher")
1342 1384 def a_matcher(text):
1343 1385 return ["completion_a"]
1344 1386
1345 1387 @completion_matcher(identifier="b_matcher", api_version=2)
1346 1388 def b_matcher(context: CompletionContext):
1347 1389 text = context.token
1348 1390 result = {"completions": [SimpleCompletion("completion_b")]}
1349 1391
1350 1392 if text == "suppress c":
1351 1393 result["suppress"] = {"c_matcher"}
1352 1394
1353 1395 if text.startswith("suppress all"):
1354 1396 result["suppress"] = True
1355 1397 if text == "suppress all but c":
1356 1398 result["do_not_suppress"] = {"c_matcher"}
1357 1399 if text == "suppress all but a":
1358 1400 result["do_not_suppress"] = {"a_matcher"}
1359 1401
1360 1402 return result
1361 1403
1362 1404 @completion_matcher(identifier="c_matcher")
1363 1405 def c_matcher(text):
1364 1406 return ["completion_c"]
1365 1407
1366 1408 with custom_matchers([a_matcher, b_matcher, c_matcher]):
1367 1409 ip = get_ipython()
1368 1410 c = ip.Completer
1369 1411
1370 1412 def _(text, expected):
1371 1413 c.use_jedi = False
1372 1414 s, matches = c.complete(text)
1373 1415 self.assertEqual(expected, matches)
1374 1416
1375 1417 _("do not suppress", ["completion_a", "completion_b", "completion_c"])
1376 1418 _("suppress all", ["completion_b"])
1377 1419 _("suppress all but a", ["completion_a", "completion_b"])
1378 1420 _("suppress all but c", ["completion_b", "completion_c"])
1379 1421
1380 1422 def configure(suppression_config):
1381 1423 cfg = Config()
1382 1424 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1383 1425 c.update_config(cfg)
1384 1426
1385 1427 # test that configuration takes priority over the run-time decisions
1386 1428
1387 1429 configure(False)
1388 1430 _("suppress all", ["completion_a", "completion_b", "completion_c"])
1389 1431
1390 1432 configure({"b_matcher": False})
1391 1433 _("suppress all", ["completion_a", "completion_b", "completion_c"])
1392 1434
1393 1435 configure({"a_matcher": False})
1394 1436 _("suppress all", ["completion_b"])
1395 1437
1396 1438 configure({"b_matcher": True})
1397 1439 _("do not suppress", ["completion_b"])
1398 1440
1399 1441 configure(True)
1400 1442 _("do not suppress", ["completion_a"])
1401 1443
1402 1444 def test_matcher_suppression_with_iterator(self):
1403 1445 @completion_matcher(identifier="matcher_returning_iterator")
1404 1446 def matcher_returning_iterator(text):
1405 1447 return iter(["completion_iter"])
1406 1448
1407 1449 @completion_matcher(identifier="matcher_returning_list")
1408 1450 def matcher_returning_list(text):
1409 1451 return ["completion_list"]
1410 1452
1411 1453 with custom_matchers([matcher_returning_iterator, matcher_returning_list]):
1412 1454 ip = get_ipython()
1413 1455 c = ip.Completer
1414 1456
1415 1457 def _(text, expected):
1416 1458 c.use_jedi = False
1417 1459 s, matches = c.complete(text)
1418 1460 self.assertEqual(expected, matches)
1419 1461
1420 1462 def configure(suppression_config):
1421 1463 cfg = Config()
1422 1464 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1423 1465 c.update_config(cfg)
1424 1466
1425 1467 configure(False)
1426 1468 _("---", ["completion_iter", "completion_list"])
1427 1469
1428 1470 configure(True)
1429 1471 _("---", ["completion_iter"])
1430 1472
1431 1473 configure(None)
1432 1474 _("--", ["completion_iter", "completion_list"])
1433 1475
1434 1476 def test_matcher_suppression_with_jedi(self):
1435 1477 ip = get_ipython()
1436 1478 c = ip.Completer
1437 1479 c.use_jedi = True
1438 1480
1439 1481 def configure(suppression_config):
1440 1482 cfg = Config()
1441 1483 cfg.IPCompleter.suppress_competing_matchers = suppression_config
1442 1484 c.update_config(cfg)
1443 1485
1444 1486 def _():
1445 1487 with provisionalcompleter():
1446 1488 matches = [completion.text for completion in c.completions("dict.", 5)]
1447 1489 self.assertIn("keys", matches)
1448 1490
1449 1491 configure(False)
1450 1492 _()
1451 1493
1452 1494 configure(True)
1453 1495 _()
1454 1496
1455 1497 configure(None)
1456 1498 _()
1457 1499
1458 1500 def test_matcher_disabling(self):
1459 1501 @completion_matcher(identifier="a_matcher")
1460 1502 def a_matcher(text):
1461 1503 return ["completion_a"]
1462 1504
1463 1505 @completion_matcher(identifier="b_matcher")
1464 1506 def b_matcher(text):
1465 1507 return ["completion_b"]
1466 1508
1467 1509 def _(expected):
1468 1510 s, matches = c.complete("completion_")
1469 1511 self.assertEqual(expected, matches)
1470 1512
1471 1513 with custom_matchers([a_matcher, b_matcher]):
1472 1514 ip = get_ipython()
1473 1515 c = ip.Completer
1474 1516
1475 1517 _(["completion_a", "completion_b"])
1476 1518
1477 1519 cfg = Config()
1478 1520 cfg.IPCompleter.disable_matchers = ["b_matcher"]
1479 1521 c.update_config(cfg)
1480 1522
1481 1523 _(["completion_a"])
1482 1524
1483 1525 cfg.IPCompleter.disable_matchers = []
1484 1526 c.update_config(cfg)
1485 1527
1486 1528 def test_matcher_priority(self):
1487 1529 @completion_matcher(identifier="a_matcher", priority=0, api_version=2)
1488 1530 def a_matcher(text):
1489 1531 return {"completions": [SimpleCompletion("completion_a")], "suppress": True}
1490 1532
1491 1533 @completion_matcher(identifier="b_matcher", priority=2, api_version=2)
1492 1534 def b_matcher(text):
1493 1535 return {"completions": [SimpleCompletion("completion_b")], "suppress": True}
1494 1536
1495 1537 def _(expected):
1496 1538 s, matches = c.complete("completion_")
1497 1539 self.assertEqual(expected, matches)
1498 1540
1499 1541 with custom_matchers([a_matcher, b_matcher]):
1500 1542 ip = get_ipython()
1501 1543 c = ip.Completer
1502 1544
1503 1545 _(["completion_b"])
1504 1546 a_matcher.matcher_priority = 3
1505 1547 _(["completion_a"])
General Comments 0
You need to be logged in to leave comments. Login now