##// END OF EJS Templates
Merge branch 'main' into shaperilio/autoreload-verbosity
Emilio Graff -
r27969:8e3376e9 merge
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (738 lines changed) Show them Hide them
@@ -0,0 +1,738 b''
1 from typing import (
2 Any,
3 Callable,
4 Dict,
5 Set,
6 Sequence,
7 Tuple,
8 NamedTuple,
9 Type,
10 Literal,
11 Union,
12 TYPE_CHECKING,
13 )
14 import ast
15 import builtins
16 import collections
17 import operator
18 import sys
19 from functools import cached_property
20 from dataclasses import dataclass, field
21
22 from IPython.utils.docs import GENERATING_DOCUMENTATION
23 from IPython.utils.decorators import undoc
24
25
26 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
27 from typing_extensions import Protocol
28 else:
29 # do not require on runtime
30 Protocol = object # requires Python >=3.8
31
32
33 @undoc
34 class HasGetItem(Protocol):
35 def __getitem__(self, key) -> None:
36 ...
37
38
39 @undoc
40 class InstancesHaveGetItem(Protocol):
41 def __call__(self, *args, **kwargs) -> HasGetItem:
42 ...
43
44
45 @undoc
46 class HasGetAttr(Protocol):
47 def __getattr__(self, key) -> None:
48 ...
49
50
51 @undoc
52 class DoesNotHaveGetAttr(Protocol):
53 pass
54
55
56 # By default `__getattr__` is not explicitly implemented on most objects
57 MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr]
58
59
60 def _unbind_method(func: Callable) -> Union[Callable, None]:
61 """Get unbound method for given bound method.
62
63 Returns None if cannot get unbound method, or method is already unbound.
64 """
65 owner = getattr(func, "__self__", None)
66 owner_class = type(owner)
67 name = getattr(func, "__name__", None)
68 instance_dict_overrides = getattr(owner, "__dict__", None)
69 if (
70 owner is not None
71 and name
72 and (
73 not instance_dict_overrides
74 or (instance_dict_overrides and name not in instance_dict_overrides)
75 )
76 ):
77 return getattr(owner_class, name)
78 return None
79
80
81 @undoc
82 @dataclass
83 class EvaluationPolicy:
84 """Definition of evaluation policy."""
85
86 allow_locals_access: bool = False
87 allow_globals_access: bool = False
88 allow_item_access: bool = False
89 allow_attr_access: bool = False
90 allow_builtins_access: bool = False
91 allow_all_operations: bool = False
92 allow_any_calls: bool = False
93 allowed_calls: Set[Callable] = field(default_factory=set)
94
95 def can_get_item(self, value, item):
96 return self.allow_item_access
97
98 def can_get_attr(self, value, attr):
99 return self.allow_attr_access
100
101 def can_operate(self, dunders: Tuple[str, ...], a, b=None):
102 if self.allow_all_operations:
103 return True
104
105 def can_call(self, func):
106 if self.allow_any_calls:
107 return True
108
109 if func in self.allowed_calls:
110 return True
111
112 owner_method = _unbind_method(func)
113
114 if owner_method and owner_method in self.allowed_calls:
115 return True
116
117
118 def _get_external(module_name: str, access_path: Sequence[str]):
119 """Get value from external module given a dotted access path.
120
121 Raises:
122 * `KeyError` if module is removed not found, and
123 * `AttributeError` if acess path does not match an exported object
124 """
125 member_type = sys.modules[module_name]
126 for attr in access_path:
127 member_type = getattr(member_type, attr)
128 return member_type
129
130
131 def _has_original_dunder_external(
132 value,
133 module_name: str,
134 access_path: Sequence[str],
135 method_name: str,
136 ):
137 if module_name not in sys.modules:
138 # LBYLB as it is faster
139 return False
140 try:
141 member_type = _get_external(module_name, access_path)
142 value_type = type(value)
143 if type(value) == member_type:
144 return True
145 if method_name == "__getattribute__":
146 # we have to short-circuit here due to an unresolved issue in
147 # `isinstance` implementation: https://bugs.python.org/issue32683
148 return False
149 if isinstance(value, member_type):
150 method = getattr(value_type, method_name, None)
151 member_method = getattr(member_type, method_name, None)
152 if member_method == method:
153 return True
154 except (AttributeError, KeyError):
155 return False
156
157
158 def _has_original_dunder(
159 value, allowed_types, allowed_methods, allowed_external, method_name
160 ):
161 # note: Python ignores `__getattr__`/`__getitem__` on instances,
162 # we only need to check at class level
163 value_type = type(value)
164
165 # strict type check passes → no need to check method
166 if value_type in allowed_types:
167 return True
168
169 method = getattr(value_type, method_name, None)
170
171 if method is None:
172 return None
173
174 if method in allowed_methods:
175 return True
176
177 for module_name, *access_path in allowed_external:
178 if _has_original_dunder_external(value, module_name, access_path, method_name):
179 return True
180
181 return False
182
183
184 @undoc
185 @dataclass
186 class SelectivePolicy(EvaluationPolicy):
187 allowed_getitem: Set[InstancesHaveGetItem] = field(default_factory=set)
188 allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set)
189
190 allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set)
191 allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set)
192
193 allowed_operations: Set = field(default_factory=set)
194 allowed_operations_external: Set[Tuple[str, ...]] = field(default_factory=set)
195
196 _operation_methods_cache: Dict[str, Set[Callable]] = field(
197 default_factory=dict, init=False
198 )
199
200 def can_get_attr(self, value, attr):
201 has_original_attribute = _has_original_dunder(
202 value,
203 allowed_types=self.allowed_getattr,
204 allowed_methods=self._getattribute_methods,
205 allowed_external=self.allowed_getattr_external,
206 method_name="__getattribute__",
207 )
208 has_original_attr = _has_original_dunder(
209 value,
210 allowed_types=self.allowed_getattr,
211 allowed_methods=self._getattr_methods,
212 allowed_external=self.allowed_getattr_external,
213 method_name="__getattr__",
214 )
215
216 accept = False
217
218 # Many objects do not have `__getattr__`, this is fine.
219 if has_original_attr is None and has_original_attribute:
220 accept = True
221 else:
222 # Accept objects without modifications to `__getattr__` and `__getattribute__`
223 accept = has_original_attr and has_original_attribute
224
225 if accept:
226 # We still need to check for overriden properties.
227
228 value_class = type(value)
229 if not hasattr(value_class, attr):
230 return True
231
232 class_attr_val = getattr(value_class, attr)
233 is_property = isinstance(class_attr_val, property)
234
235 if not is_property:
236 return True
237
238 # Properties in allowed types are ok (although we do not include any
239 # properties in our default allow list currently).
240 if type(value) in self.allowed_getattr:
241 return True # pragma: no cover
242
243 # Properties in subclasses of allowed types may be ok if not changed
244 for module_name, *access_path in self.allowed_getattr_external:
245 try:
246 external_class = _get_external(module_name, access_path)
247 external_class_attr_val = getattr(external_class, attr)
248 except (KeyError, AttributeError):
249 return False # pragma: no cover
250 return class_attr_val == external_class_attr_val
251
252 return False
253
254 def can_get_item(self, value, item):
255 """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified."""
256 return _has_original_dunder(
257 value,
258 allowed_types=self.allowed_getitem,
259 allowed_methods=self._getitem_methods,
260 allowed_external=self.allowed_getitem_external,
261 method_name="__getitem__",
262 )
263
264 def can_operate(self, dunders: Tuple[str, ...], a, b=None):
265 objects = [a]
266 if b is not None:
267 objects.append(b)
268 return all(
269 [
270 _has_original_dunder(
271 obj,
272 allowed_types=self.allowed_operations,
273 allowed_methods=self._operator_dunder_methods(dunder),
274 allowed_external=self.allowed_operations_external,
275 method_name=dunder,
276 )
277 for dunder in dunders
278 for obj in objects
279 ]
280 )
281
282 def _operator_dunder_methods(self, dunder: str) -> Set[Callable]:
283 if dunder not in self._operation_methods_cache:
284 self._operation_methods_cache[dunder] = self._safe_get_methods(
285 self.allowed_operations, dunder
286 )
287 return self._operation_methods_cache[dunder]
288
289 @cached_property
290 def _getitem_methods(self) -> Set[Callable]:
291 return self._safe_get_methods(self.allowed_getitem, "__getitem__")
292
293 @cached_property
294 def _getattr_methods(self) -> Set[Callable]:
295 return self._safe_get_methods(self.allowed_getattr, "__getattr__")
296
297 @cached_property
298 def _getattribute_methods(self) -> Set[Callable]:
299 return self._safe_get_methods(self.allowed_getattr, "__getattribute__")
300
301 def _safe_get_methods(self, classes, name) -> Set[Callable]:
302 return {
303 method
304 for class_ in classes
305 for method in [getattr(class_, name, None)]
306 if method
307 }
308
309
310 class _DummyNamedTuple(NamedTuple):
311 """Used internally to retrieve methods of named tuple instance."""
312
313
314 class EvaluationContext(NamedTuple):
315 #: Local namespace
316 locals: dict
317 #: Global namespace
318 globals: dict
319 #: Evaluation policy identifier
320 evaluation: Literal[
321 "forbidden", "minimal", "limited", "unsafe", "dangerous"
322 ] = "forbidden"
323 #: Whether the evalution of code takes place inside of a subscript.
324 #: Useful for evaluating ``:-1, 'col'`` in ``df[:-1, 'col']``.
325 in_subscript: bool = False
326
327
328 class _IdentitySubscript:
329 """Returns the key itself when item is requested via subscript."""
330
331 def __getitem__(self, key):
332 return key
333
334
335 IDENTITY_SUBSCRIPT = _IdentitySubscript()
336 SUBSCRIPT_MARKER = "__SUBSCRIPT_SENTINEL__"
337
338
339 class GuardRejection(Exception):
340 """Exception raised when guard rejects evaluation attempt."""
341
342 pass
343
344
345 def guarded_eval(code: str, context: EvaluationContext):
346 """Evaluate provided code in the evaluation context.
347
348 If evaluation policy given by context is set to ``forbidden``
349 no evaluation will be performed; if it is set to ``dangerous``
350 standard :func:`eval` will be used; finally, for any other,
351 policy :func:`eval_node` will be called on parsed AST.
352 """
353 locals_ = context.locals
354
355 if context.evaluation == "forbidden":
356 raise GuardRejection("Forbidden mode")
357
358 # note: not using `ast.literal_eval` as it does not implement
359 # getitem at all, for example it fails on simple `[0][1]`
360
361 if context.in_subscript:
362 # syntatic sugar for ellipsis (:) is only available in susbcripts
363 # so we need to trick the ast parser into thinking that we have
364 # a subscript, but we need to be able to later recognise that we did
365 # it so we can ignore the actual __getitem__ operation
366 if not code:
367 return tuple()
368 locals_ = locals_.copy()
369 locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT
370 code = SUBSCRIPT_MARKER + "[" + code + "]"
371 context = EvaluationContext(**{**context._asdict(), **{"locals": locals_}})
372
373 if context.evaluation == "dangerous":
374 return eval(code, context.globals, context.locals)
375
376 expression = ast.parse(code, mode="eval")
377
378 return eval_node(expression, context)
379
380
381 BINARY_OP_DUNDERS: Dict[Type[ast.operator], Tuple[str]] = {
382 ast.Add: ("__add__",),
383 ast.Sub: ("__sub__",),
384 ast.Mult: ("__mul__",),
385 ast.Div: ("__truediv__",),
386 ast.FloorDiv: ("__floordiv__",),
387 ast.Mod: ("__mod__",),
388 ast.Pow: ("__pow__",),
389 ast.LShift: ("__lshift__",),
390 ast.RShift: ("__rshift__",),
391 ast.BitOr: ("__or__",),
392 ast.BitXor: ("__xor__",),
393 ast.BitAnd: ("__and__",),
394 ast.MatMult: ("__matmul__",),
395 }
396
397 COMP_OP_DUNDERS: Dict[Type[ast.cmpop], Tuple[str, ...]] = {
398 ast.Eq: ("__eq__",),
399 ast.NotEq: ("__ne__", "__eq__"),
400 ast.Lt: ("__lt__", "__gt__"),
401 ast.LtE: ("__le__", "__ge__"),
402 ast.Gt: ("__gt__", "__lt__"),
403 ast.GtE: ("__ge__", "__le__"),
404 ast.In: ("__contains__",),
405 # Note: ast.Is, ast.IsNot, ast.NotIn are handled specially
406 }
407
408 UNARY_OP_DUNDERS: Dict[Type[ast.unaryop], Tuple[str, ...]] = {
409 ast.USub: ("__neg__",),
410 ast.UAdd: ("__pos__",),
411 # we have to check both __inv__ and __invert__!
412 ast.Invert: ("__invert__", "__inv__"),
413 ast.Not: ("__not__",),
414 }
415
416
417 def _find_dunder(node_op, dunders) -> Union[Tuple[str, ...], None]:
418 dunder = None
419 for op, candidate_dunder in dunders.items():
420 if isinstance(node_op, op):
421 dunder = candidate_dunder
422 return dunder
423
424
425 def eval_node(node: Union[ast.AST, None], context: EvaluationContext):
426 """Evaluate AST node in provided context.
427
428 Applies evaluation restrictions defined in the context. Currently does not support evaluation of functions with keyword arguments.
429
430 Does not evaluate actions that always have side effects:
431
432 - class definitions (``class sth: ...``)
433 - function definitions (``def sth: ...``)
434 - variable assignments (``x = 1``)
435 - augmented assignments (``x += 1``)
436 - deletions (``del x``)
437
438 Does not evaluate operations which do not return values:
439
440 - assertions (``assert x``)
441 - pass (``pass``)
442 - imports (``import x``)
443 - control flow:
444
445 - conditionals (``if x:``) except for ternary IfExp (``a if x else b``)
446 - loops (``for`` and `while``)
447 - exception handling
448
449 The purpose of this function is to guard against unwanted side-effects;
450 it does not give guarantees on protection from malicious code execution.
451 """
452 policy = EVALUATION_POLICIES[context.evaluation]
453 if node is None:
454 return None
455 if isinstance(node, ast.Expression):
456 return eval_node(node.body, context)
457 if isinstance(node, ast.BinOp):
458 left = eval_node(node.left, context)
459 right = eval_node(node.right, context)
460 dunders = _find_dunder(node.op, BINARY_OP_DUNDERS)
461 if dunders:
462 if policy.can_operate(dunders, left, right):
463 return getattr(left, dunders[0])(right)
464 else:
465 raise GuardRejection(
466 f"Operation (`{dunders}`) for",
467 type(left),
468 f"not allowed in {context.evaluation} mode",
469 )
470 if isinstance(node, ast.Compare):
471 left = eval_node(node.left, context)
472 all_true = True
473 negate = False
474 for op, right in zip(node.ops, node.comparators):
475 right = eval_node(right, context)
476 dunder = None
477 dunders = _find_dunder(op, COMP_OP_DUNDERS)
478 if not dunders:
479 if isinstance(op, ast.NotIn):
480 dunders = COMP_OP_DUNDERS[ast.In]
481 negate = True
482 if isinstance(op, ast.Is):
483 dunder = "is_"
484 if isinstance(op, ast.IsNot):
485 dunder = "is_"
486 negate = True
487 if not dunder and dunders:
488 dunder = dunders[0]
489 if dunder:
490 a, b = (right, left) if dunder == "__contains__" else (left, right)
491 if dunder == "is_" or dunders and policy.can_operate(dunders, a, b):
492 result = getattr(operator, dunder)(a, b)
493 if negate:
494 result = not result
495 if not result:
496 all_true = False
497 left = right
498 else:
499 raise GuardRejection(
500 f"Comparison (`{dunder}`) for",
501 type(left),
502 f"not allowed in {context.evaluation} mode",
503 )
504 else:
505 raise ValueError(
506 f"Comparison `{dunder}` not supported"
507 ) # pragma: no cover
508 return all_true
509 if isinstance(node, ast.Constant):
510 return node.value
511 if isinstance(node, ast.Index):
512 # deprecated since Python 3.9
513 return eval_node(node.value, context) # pragma: no cover
514 if isinstance(node, ast.Tuple):
515 return tuple(eval_node(e, context) for e in node.elts)
516 if isinstance(node, ast.List):
517 return [eval_node(e, context) for e in node.elts]
518 if isinstance(node, ast.Set):
519 return {eval_node(e, context) for e in node.elts}
520 if isinstance(node, ast.Dict):
521 return dict(
522 zip(
523 [eval_node(k, context) for k in node.keys],
524 [eval_node(v, context) for v in node.values],
525 )
526 )
527 if isinstance(node, ast.Slice):
528 return slice(
529 eval_node(node.lower, context),
530 eval_node(node.upper, context),
531 eval_node(node.step, context),
532 )
533 if isinstance(node, ast.ExtSlice):
534 # deprecated since Python 3.9
535 return tuple([eval_node(dim, context) for dim in node.dims]) # pragma: no cover
536 if isinstance(node, ast.UnaryOp):
537 value = eval_node(node.operand, context)
538 dunders = _find_dunder(node.op, UNARY_OP_DUNDERS)
539 if dunders:
540 if policy.can_operate(dunders, value):
541 return getattr(value, dunders[0])()
542 else:
543 raise GuardRejection(
544 f"Operation (`{dunders}`) for",
545 type(value),
546 f"not allowed in {context.evaluation} mode",
547 )
548 if isinstance(node, ast.Subscript):
549 value = eval_node(node.value, context)
550 slice_ = eval_node(node.slice, context)
551 if policy.can_get_item(value, slice_):
552 return value[slice_]
553 raise GuardRejection(
554 "Subscript access (`__getitem__`) for",
555 type(value), # not joined to avoid calling `repr`
556 f" not allowed in {context.evaluation} mode",
557 )
558 if isinstance(node, ast.Name):
559 if policy.allow_locals_access and node.id in context.locals:
560 return context.locals[node.id]
561 if policy.allow_globals_access and node.id in context.globals:
562 return context.globals[node.id]
563 if policy.allow_builtins_access and hasattr(builtins, node.id):
564 # note: do not use __builtins__, it is implementation detail of cPython
565 return getattr(builtins, node.id)
566 if not policy.allow_globals_access and not policy.allow_locals_access:
567 raise GuardRejection(
568 f"Namespace access not allowed in {context.evaluation} mode"
569 )
570 else:
571 raise NameError(f"{node.id} not found in locals, globals, nor builtins")
572 if isinstance(node, ast.Attribute):
573 value = eval_node(node.value, context)
574 if policy.can_get_attr(value, node.attr):
575 return getattr(value, node.attr)
576 raise GuardRejection(
577 "Attribute access (`__getattr__`) for",
578 type(value), # not joined to avoid calling `repr`
579 f"not allowed in {context.evaluation} mode",
580 )
581 if isinstance(node, ast.IfExp):
582 test = eval_node(node.test, context)
583 if test:
584 return eval_node(node.body, context)
585 else:
586 return eval_node(node.orelse, context)
587 if isinstance(node, ast.Call):
588 func = eval_node(node.func, context)
589 if policy.can_call(func) and not node.keywords:
590 args = [eval_node(arg, context) for arg in node.args]
591 return func(*args)
592 raise GuardRejection(
593 "Call for",
594 func, # not joined to avoid calling `repr`
595 f"not allowed in {context.evaluation} mode",
596 )
597 raise ValueError("Unhandled node", ast.dump(node))
598
599
600 SUPPORTED_EXTERNAL_GETITEM = {
601 ("pandas", "core", "indexing", "_iLocIndexer"),
602 ("pandas", "core", "indexing", "_LocIndexer"),
603 ("pandas", "DataFrame"),
604 ("pandas", "Series"),
605 ("numpy", "ndarray"),
606 ("numpy", "void"),
607 }
608
609
610 BUILTIN_GETITEM: Set[InstancesHaveGetItem] = {
611 dict,
612 str,
613 bytes,
614 list,
615 tuple,
616 collections.defaultdict,
617 collections.deque,
618 collections.OrderedDict,
619 collections.ChainMap,
620 collections.UserDict,
621 collections.UserList,
622 collections.UserString,
623 _DummyNamedTuple,
624 _IdentitySubscript,
625 }
626
627
628 def _list_methods(cls, source=None):
629 """For use on immutable objects or with methods returning a copy"""
630 return [getattr(cls, k) for k in (source if source else dir(cls))]
631
632
633 dict_non_mutating_methods = ("copy", "keys", "values", "items")
634 list_non_mutating_methods = ("copy", "index", "count")
635 set_non_mutating_methods = set(dir(set)) & set(dir(frozenset))
636
637
638 dict_keys: Type[collections.abc.KeysView] = type({}.keys())
639 method_descriptor: Any = type(list.copy)
640
641 NUMERICS = {int, float, complex}
642
643 ALLOWED_CALLS = {
644 bytes,
645 *_list_methods(bytes),
646 dict,
647 *_list_methods(dict, dict_non_mutating_methods),
648 dict_keys.isdisjoint,
649 list,
650 *_list_methods(list, list_non_mutating_methods),
651 set,
652 *_list_methods(set, set_non_mutating_methods),
653 frozenset,
654 *_list_methods(frozenset),
655 range,
656 str,
657 *_list_methods(str),
658 tuple,
659 *_list_methods(tuple),
660 *NUMERICS,
661 *[method for numeric_cls in NUMERICS for method in _list_methods(numeric_cls)],
662 collections.deque,
663 *_list_methods(collections.deque, list_non_mutating_methods),
664 collections.defaultdict,
665 *_list_methods(collections.defaultdict, dict_non_mutating_methods),
666 collections.OrderedDict,
667 *_list_methods(collections.OrderedDict, dict_non_mutating_methods),
668 collections.UserDict,
669 *_list_methods(collections.UserDict, dict_non_mutating_methods),
670 collections.UserList,
671 *_list_methods(collections.UserList, list_non_mutating_methods),
672 collections.UserString,
673 *_list_methods(collections.UserString, dir(str)),
674 collections.Counter,
675 *_list_methods(collections.Counter, dict_non_mutating_methods),
676 collections.Counter.elements,
677 collections.Counter.most_common,
678 }
679
680 BUILTIN_GETATTR: Set[MayHaveGetattr] = {
681 *BUILTIN_GETITEM,
682 set,
683 frozenset,
684 object,
685 type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`.
686 *NUMERICS,
687 dict_keys,
688 method_descriptor,
689 }
690
691
692 BUILTIN_OPERATIONS = {*BUILTIN_GETATTR}
693
694 EVALUATION_POLICIES = {
695 "minimal": EvaluationPolicy(
696 allow_builtins_access=True,
697 allow_locals_access=False,
698 allow_globals_access=False,
699 allow_item_access=False,
700 allow_attr_access=False,
701 allowed_calls=set(),
702 allow_any_calls=False,
703 allow_all_operations=False,
704 ),
705 "limited": SelectivePolicy(
706 allowed_getitem=BUILTIN_GETITEM,
707 allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM,
708 allowed_getattr=BUILTIN_GETATTR,
709 allowed_getattr_external={
710 # pandas Series/Frame implements custom `__getattr__`
711 ("pandas", "DataFrame"),
712 ("pandas", "Series"),
713 },
714 allowed_operations=BUILTIN_OPERATIONS,
715 allow_builtins_access=True,
716 allow_locals_access=True,
717 allow_globals_access=True,
718 allowed_calls=ALLOWED_CALLS,
719 ),
720 "unsafe": EvaluationPolicy(
721 allow_builtins_access=True,
722 allow_locals_access=True,
723 allow_globals_access=True,
724 allow_attr_access=True,
725 allow_item_access=True,
726 allow_any_calls=True,
727 allow_all_operations=True,
728 ),
729 }
730
731
732 __all__ = [
733 "guarded_eval",
734 "eval_node",
735 "GuardRejection",
736 "EvaluationContext",
737 "_unbind_method",
738 ]
This diff has been collapsed as it changes many lines, (570 lines changed) Show them Hide them
@@ -0,0 +1,570 b''
1 from contextlib import contextmanager
2 from typing import NamedTuple
3 from functools import partial
4 from IPython.core.guarded_eval import (
5 EvaluationContext,
6 GuardRejection,
7 guarded_eval,
8 _unbind_method,
9 )
10 from IPython.testing import decorators as dec
11 import pytest
12
13
14 def create_context(evaluation: str, **kwargs):
15 return EvaluationContext(locals=kwargs, globals={}, evaluation=evaluation)
16
17
18 forbidden = partial(create_context, "forbidden")
19 minimal = partial(create_context, "minimal")
20 limited = partial(create_context, "limited")
21 unsafe = partial(create_context, "unsafe")
22 dangerous = partial(create_context, "dangerous")
23
24 LIMITED_OR_HIGHER = [limited, unsafe, dangerous]
25 MINIMAL_OR_HIGHER = [minimal, *LIMITED_OR_HIGHER]
26
27
28 @contextmanager
29 def module_not_installed(module: str):
30 import sys
31
32 try:
33 to_restore = sys.modules[module]
34 del sys.modules[module]
35 except KeyError:
36 to_restore = None
37 try:
38 yield
39 finally:
40 sys.modules[module] = to_restore
41
42
43 def test_external_not_installed():
44 """
45 Because attribute check requires checking if object is not of allowed
46 external type, this tests logic for absence of external module.
47 """
48
49 class Custom:
50 def __init__(self):
51 self.test = 1
52
53 def __getattr__(self, key):
54 return key
55
56 with module_not_installed("pandas"):
57 context = limited(x=Custom())
58 with pytest.raises(GuardRejection):
59 guarded_eval("x.test", context)
60
61
62 @dec.skip_without("pandas")
63 def test_external_changed_api(monkeypatch):
64 """Check that the execution rejects if external API changed paths"""
65 import pandas as pd
66
67 series = pd.Series([1], index=["a"])
68
69 with monkeypatch.context() as m:
70 m.delattr(pd, "Series")
71 context = limited(data=series)
72 with pytest.raises(GuardRejection):
73 guarded_eval("data.iloc[0]", context)
74
75
76 @dec.skip_without("pandas")
77 def test_pandas_series_iloc():
78 import pandas as pd
79
80 series = pd.Series([1], index=["a"])
81 context = limited(data=series)
82 assert guarded_eval("data.iloc[0]", context) == 1
83
84
85 def test_rejects_custom_properties():
86 class BadProperty:
87 @property
88 def iloc(self):
89 return [None]
90
91 series = BadProperty()
92 context = limited(data=series)
93
94 with pytest.raises(GuardRejection):
95 guarded_eval("data.iloc[0]", context)
96
97
98 @dec.skip_without("pandas")
99 def test_accepts_non_overriden_properties():
100 import pandas as pd
101
102 class GoodProperty(pd.Series):
103 pass
104
105 series = GoodProperty([1], index=["a"])
106 context = limited(data=series)
107
108 assert guarded_eval("data.iloc[0]", context) == 1
109
110
111 @dec.skip_without("pandas")
112 def test_pandas_series():
113 import pandas as pd
114
115 context = limited(data=pd.Series([1], index=["a"]))
116 assert guarded_eval('data["a"]', context) == 1
117 with pytest.raises(KeyError):
118 guarded_eval('data["c"]', context)
119
120
121 @dec.skip_without("pandas")
122 def test_pandas_bad_series():
123 import pandas as pd
124
125 class BadItemSeries(pd.Series):
126 def __getitem__(self, key):
127 return "CUSTOM_ITEM"
128
129 class BadAttrSeries(pd.Series):
130 def __getattr__(self, key):
131 return "CUSTOM_ATTR"
132
133 bad_series = BadItemSeries([1], index=["a"])
134 context = limited(data=bad_series)
135
136 with pytest.raises(GuardRejection):
137 guarded_eval('data["a"]', context)
138 with pytest.raises(GuardRejection):
139 guarded_eval('data["c"]', context)
140
141 # note: here result is a bit unexpected because
142 # pandas `__getattr__` calls `__getitem__`;
143 # FIXME - special case to handle it?
144 assert guarded_eval("data.a", context) == "CUSTOM_ITEM"
145
146 context = unsafe(data=bad_series)
147 assert guarded_eval('data["a"]', context) == "CUSTOM_ITEM"
148
149 bad_attr_series = BadAttrSeries([1], index=["a"])
150 context = limited(data=bad_attr_series)
151 assert guarded_eval('data["a"]', context) == 1
152 with pytest.raises(GuardRejection):
153 guarded_eval("data.a", context)
154
155
156 @dec.skip_without("pandas")
157 def test_pandas_dataframe_loc():
158 import pandas as pd
159 from pandas.testing import assert_series_equal
160
161 data = pd.DataFrame([{"a": 1}])
162 context = limited(data=data)
163 assert_series_equal(guarded_eval('data.loc[:, "a"]', context), data["a"])
164
165
166 def test_named_tuple():
167 class GoodNamedTuple(NamedTuple):
168 a: str
169 pass
170
171 class BadNamedTuple(NamedTuple):
172 a: str
173
174 def __getitem__(self, key):
175 return None
176
177 good = GoodNamedTuple(a="x")
178 bad = BadNamedTuple(a="x")
179
180 context = limited(data=good)
181 assert guarded_eval("data[0]", context) == "x"
182
183 context = limited(data=bad)
184 with pytest.raises(GuardRejection):
185 guarded_eval("data[0]", context)
186
187
188 def test_dict():
189 context = limited(data={"a": 1, "b": {"x": 2}, ("x", "y"): 3})
190 assert guarded_eval('data["a"]', context) == 1
191 assert guarded_eval('data["b"]', context) == {"x": 2}
192 assert guarded_eval('data["b"]["x"]', context) == 2
193 assert guarded_eval('data["x", "y"]', context) == 3
194
195 assert guarded_eval("data.keys", context)
196
197
198 def test_set():
199 context = limited(data={"a", "b"})
200 assert guarded_eval("data.difference", context)
201
202
203 def test_list():
204 context = limited(data=[1, 2, 3])
205 assert guarded_eval("data[1]", context) == 2
206 assert guarded_eval("data.copy", context)
207
208
209 def test_dict_literal():
210 context = limited()
211 assert guarded_eval("{}", context) == {}
212 assert guarded_eval('{"a": 1}', context) == {"a": 1}
213
214
215 def test_list_literal():
216 context = limited()
217 assert guarded_eval("[]", context) == []
218 assert guarded_eval('[1, "a"]', context) == [1, "a"]
219
220
221 def test_set_literal():
222 context = limited()
223 assert guarded_eval("set()", context) == set()
224 assert guarded_eval('{"a"}', context) == {"a"}
225
226
227 def test_evaluates_if_expression():
228 context = limited()
229 assert guarded_eval("2 if True else 3", context) == 2
230 assert guarded_eval("4 if False else 5", context) == 5
231
232
233 def test_object():
234 obj = object()
235 context = limited(obj=obj)
236 assert guarded_eval("obj.__dir__", context) == obj.__dir__
237
238
239 @pytest.mark.parametrize(
240 "code,expected",
241 [
242 ["int.numerator", int.numerator],
243 ["float.is_integer", float.is_integer],
244 ["complex.real", complex.real],
245 ],
246 )
247 def test_number_attributes(code, expected):
248 assert guarded_eval(code, limited()) == expected
249
250
251 def test_method_descriptor():
252 context = limited()
253 assert guarded_eval("list.copy.__name__", context) == "copy"
254
255
256 @pytest.mark.parametrize(
257 "data,good,bad,expected",
258 [
259 [[1, 2, 3], "data.index(2)", "data.append(4)", 1],
260 [{"a": 1}, "data.keys().isdisjoint({})", "data.update()", True],
261 ],
262 )
263 def test_evaluates_calls(data, good, bad, expected):
264 context = limited(data=data)
265 assert guarded_eval(good, context) == expected
266
267 with pytest.raises(GuardRejection):
268 guarded_eval(bad, context)
269
270
271 @pytest.mark.parametrize(
272 "code,expected",
273 [
274 ["(1\n+\n1)", 2],
275 ["list(range(10))[-1:]", [9]],
276 ["list(range(20))[3:-2:3]", [3, 6, 9, 12, 15]],
277 ],
278 )
279 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
280 def test_evaluates_complex_cases(code, expected, context):
281 assert guarded_eval(code, context()) == expected
282
283
284 @pytest.mark.parametrize(
285 "code,expected",
286 [
287 ["1", 1],
288 ["1.0", 1.0],
289 ["0xdeedbeef", 0xDEEDBEEF],
290 ["True", True],
291 ["None", None],
292 ["{}", {}],
293 ["[]", []],
294 ],
295 )
296 @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
297 def test_evaluates_literals(code, expected, context):
298 assert guarded_eval(code, context()) == expected
299
300
301 @pytest.mark.parametrize(
302 "code,expected",
303 [
304 ["-5", -5],
305 ["+5", +5],
306 ["~5", -6],
307 ],
308 )
309 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
310 def test_evaluates_unary_operations(code, expected, context):
311 assert guarded_eval(code, context()) == expected
312
313
314 @pytest.mark.parametrize(
315 "code,expected",
316 [
317 ["1 + 1", 2],
318 ["3 - 1", 2],
319 ["2 * 3", 6],
320 ["5 // 2", 2],
321 ["5 / 2", 2.5],
322 ["5**2", 25],
323 ["2 >> 1", 1],
324 ["2 << 1", 4],
325 ["1 | 2", 3],
326 ["1 & 1", 1],
327 ["1 & 2", 0],
328 ],
329 )
330 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
331 def test_evaluates_binary_operations(code, expected, context):
332 assert guarded_eval(code, context()) == expected
333
334
335 @pytest.mark.parametrize(
336 "code,expected",
337 [
338 ["2 > 1", True],
339 ["2 < 1", False],
340 ["2 <= 1", False],
341 ["2 <= 2", True],
342 ["1 >= 2", False],
343 ["2 >= 2", True],
344 ["2 == 2", True],
345 ["1 == 2", False],
346 ["1 != 2", True],
347 ["1 != 1", False],
348 ["1 < 4 < 3", False],
349 ["(1 < 4) < 3", True],
350 ["4 > 3 > 2 > 1", True],
351 ["4 > 3 > 2 > 9", False],
352 ["1 < 2 < 3 < 4", True],
353 ["9 < 2 < 3 < 4", False],
354 ["1 < 2 > 1 > 0 > -1 < 1", True],
355 ["1 in [1] in [[1]]", True],
356 ["1 in [1] in [[2]]", False],
357 ["1 in [1]", True],
358 ["0 in [1]", False],
359 ["1 not in [1]", False],
360 ["0 not in [1]", True],
361 ["True is True", True],
362 ["False is False", True],
363 ["True is False", False],
364 ["True is not True", False],
365 ["False is not True", True],
366 ],
367 )
368 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
369 def test_evaluates_comparisons(code, expected, context):
370 assert guarded_eval(code, context()) == expected
371
372
373 def test_guards_comparisons():
374 class GoodEq(int):
375 pass
376
377 class BadEq(int):
378 def __eq__(self, other):
379 assert False
380
381 context = limited(bad=BadEq(1), good=GoodEq(1))
382
383 with pytest.raises(GuardRejection):
384 guarded_eval("bad == 1", context)
385
386 with pytest.raises(GuardRejection):
387 guarded_eval("bad != 1", context)
388
389 with pytest.raises(GuardRejection):
390 guarded_eval("1 == bad", context)
391
392 with pytest.raises(GuardRejection):
393 guarded_eval("1 != bad", context)
394
395 assert guarded_eval("good == 1", context) is True
396 assert guarded_eval("good != 1", context) is False
397 assert guarded_eval("1 == good", context) is True
398 assert guarded_eval("1 != good", context) is False
399
400
401 def test_guards_unary_operations():
402 class GoodOp(int):
403 pass
404
405 class BadOpInv(int):
406 def __inv__(self, other):
407 assert False
408
409 class BadOpInverse(int):
410 def __inv__(self, other):
411 assert False
412
413 context = limited(good=GoodOp(1), bad1=BadOpInv(1), bad2=BadOpInverse(1))
414
415 with pytest.raises(GuardRejection):
416 guarded_eval("~bad1", context)
417
418 with pytest.raises(GuardRejection):
419 guarded_eval("~bad2", context)
420
421
422 def test_guards_binary_operations():
423 class GoodOp(int):
424 pass
425
426 class BadOp(int):
427 def __add__(self, other):
428 assert False
429
430 context = limited(good=GoodOp(1), bad=BadOp(1))
431
432 with pytest.raises(GuardRejection):
433 guarded_eval("1 + bad", context)
434
435 with pytest.raises(GuardRejection):
436 guarded_eval("bad + 1", context)
437
438 assert guarded_eval("good + 1", context) == 2
439 assert guarded_eval("1 + good", context) == 2
440
441
442 def test_guards_attributes():
443 class GoodAttr(float):
444 pass
445
446 class BadAttr1(float):
447 def __getattr__(self, key):
448 assert False
449
450 class BadAttr2(float):
451 def __getattribute__(self, key):
452 assert False
453
454 context = limited(good=GoodAttr(0.5), bad1=BadAttr1(0.5), bad2=BadAttr2(0.5))
455
456 with pytest.raises(GuardRejection):
457 guarded_eval("bad1.as_integer_ratio", context)
458
459 with pytest.raises(GuardRejection):
460 guarded_eval("bad2.as_integer_ratio", context)
461
462 assert guarded_eval("good.as_integer_ratio()", context) == (1, 2)
463
464
465 @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
466 def test_access_builtins(context):
467 assert guarded_eval("round", context()) == round
468
469
470 def test_access_builtins_fails():
471 context = limited()
472 with pytest.raises(NameError):
473 guarded_eval("this_is_not_builtin", context)
474
475
476 def test_rejects_forbidden():
477 context = forbidden()
478 with pytest.raises(GuardRejection):
479 guarded_eval("1", context)
480
481
482 def test_guards_locals_and_globals():
483 context = EvaluationContext(
484 locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="minimal"
485 )
486
487 with pytest.raises(GuardRejection):
488 guarded_eval("local_a", context)
489
490 with pytest.raises(GuardRejection):
491 guarded_eval("global_b", context)
492
493
494 def test_access_locals_and_globals():
495 context = EvaluationContext(
496 locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="limited"
497 )
498 assert guarded_eval("local_a", context) == "a"
499 assert guarded_eval("global_b", context) == "b"
500
501
502 @pytest.mark.parametrize(
503 "code",
504 ["def func(): pass", "class C: pass", "x = 1", "x += 1", "del x", "import ast"],
505 )
506 @pytest.mark.parametrize("context", [minimal(), limited(), unsafe()])
507 def test_rejects_side_effect_syntax(code, context):
508 with pytest.raises(SyntaxError):
509 guarded_eval(code, context)
510
511
512 def test_subscript():
513 context = EvaluationContext(
514 locals={}, globals={}, evaluation="limited", in_subscript=True
515 )
516 empty_slice = slice(None, None, None)
517 assert guarded_eval("", context) == tuple()
518 assert guarded_eval(":", context) == empty_slice
519 assert guarded_eval("1:2:3", context) == slice(1, 2, 3)
520 assert guarded_eval(':, "a"', context) == (empty_slice, "a")
521
522
523 def test_unbind_method():
524 class X(list):
525 def index(self, k):
526 return "CUSTOM"
527
528 x = X()
529 assert _unbind_method(x.index) is X.index
530 assert _unbind_method([].index) is list.index
531 assert _unbind_method(list.index) is None
532
533
534 def test_assumption_instance_attr_do_not_matter():
535 """This is semi-specified in Python documentation.
536
537 However, since the specification says 'not guaranted
538 to work' rather than 'is forbidden to work', future
539 versions could invalidate this assumptions. This test
540 is meant to catch such a change if it ever comes true.
541 """
542
543 class T:
544 def __getitem__(self, k):
545 return "a"
546
547 def __getattr__(self, k):
548 return "a"
549
550 def f(self):
551 return "b"
552
553 t = T()
554 t.__getitem__ = f
555 t.__getattr__ = f
556 assert t[1] == "a"
557 assert t[1] == "a"
558
559
560 def test_assumption_named_tuples_share_getitem():
561 """Check assumption on named tuples sharing __getitem__"""
562 from typing import NamedTuple
563
564 class A(NamedTuple):
565 pass
566
567 class B(NamedTuple):
568 pass
569
570 assert A.__getitem__ == B.__getitem__
@@ -0,0 +1,26 b''
1 from typing import List
2
3 import pytest
4 import pygments.lexers
5 import pygments.lexer
6
7 from IPython.lib.lexers import IPythonConsoleLexer, IPythonLexer, IPython3Lexer
8
9 #: the human-readable names of the IPython lexers with ``entry_points``
10 EXPECTED_LEXER_NAMES = [
11 cls.name for cls in [IPythonConsoleLexer, IPythonLexer, IPython3Lexer]
12 ]
13
14
15 @pytest.fixture
16 def all_pygments_lexer_names() -> List[str]:
17 """Get all lexer names registered in pygments."""
18 return {l[0] for l in pygments.lexers.get_all_lexers()}
19
20
21 @pytest.mark.parametrize("expected_lexer", EXPECTED_LEXER_NAMES)
22 def test_pygments_entry_points(
23 expected_lexer: str, all_pygments_lexer_names: List[str]
24 ) -> None:
25 """Check whether the ``entry_points`` for ``pygments.lexers`` are correct."""
26 assert expected_lexer in all_pygments_lexer_names
@@ -31,6 +31,8 b' jobs:'
31 run: |
31 run: |
32 mypy -p IPython.terminal
32 mypy -p IPython.terminal
33 mypy -p IPython.core.magics
33 mypy -p IPython.core.magics
34 mypy -p IPython.core.guarded_eval
35 mypy -p IPython.core.completer
34 - name: Lint with pyflakes
36 - name: Lint with pyflakes
35 run: |
37 run: |
36 flake8 IPython/core/magics/script.py
38 flake8 IPython/core/magics/script.py
@@ -1,3 +1,4 b''
1 # PYTHON_ARGCOMPLETE_OK
1 """
2 """
2 IPython: tools for interactive and parallel computing in Python.
3 IPython: tools for interactive and parallel computing in Python.
3
4
@@ -1,3 +1,4 b''
1 # PYTHON_ARGCOMPLETE_OK
1 # encoding: utf-8
2 # encoding: utf-8
2 """Terminal-based IPython entry point.
3 """Terminal-based IPython entry point.
3 """
4 """
This diff has been collapsed as it changes many lines, (692 lines changed) Show them Hide them
@@ -50,7 +50,7 b' Backward latex completion'
50
50
51 It is sometime challenging to know how to type a character, if you are using
51 It is sometime challenging to know how to type a character, if you are using
52 IPython, or any compatible frontend you can prepend backslash to the character
52 IPython, or any compatible frontend you can prepend backslash to the character
53 and press ``<tab>`` to expand it to its latex form.
53 and press :kbd:`Tab` to expand it to its latex form.
54
54
55 .. code::
55 .. code::
56
56
@@ -59,7 +59,7 b' and press ``<tab>`` to expand it to its latex form.'
59
59
60
60
61 Both forward and backward completions can be deactivated by setting the
61 Both forward and backward completions can be deactivated by setting the
62 ``Completer.backslash_combining_completions`` option to ``False``.
62 :any:`Completer.backslash_combining_completions` option to ``False``.
63
63
64
64
65 Experimental
65 Experimental
@@ -95,7 +95,7 b' having to execute any code:'
95 ... myvar[1].bi<tab>
95 ... myvar[1].bi<tab>
96
96
97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 executing any code unlike the previously available ``IPCompleter.greedy``
98 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
99 option.
99 option.
100
100
101 Be sure to update :any:`jedi` to the latest stable version or to try the
101 Be sure to update :any:`jedi` to the latest stable version or to try the
@@ -178,6 +178,7 b' The suppression behaviour can is user-configurable via'
178
178
179 from __future__ import annotations
179 from __future__ import annotations
180 import builtins as builtin_mod
180 import builtins as builtin_mod
181 import enum
181 import glob
182 import glob
182 import inspect
183 import inspect
183 import itertools
184 import itertools
@@ -186,14 +187,16 b' import os'
186 import re
187 import re
187 import string
188 import string
188 import sys
189 import sys
190 import tokenize
189 import time
191 import time
190 import unicodedata
192 import unicodedata
191 import uuid
193 import uuid
192 import warnings
194 import warnings
195 from ast import literal_eval
196 from collections import defaultdict
193 from contextlib import contextmanager
197 from contextlib import contextmanager
194 from dataclasses import dataclass
198 from dataclasses import dataclass
195 from functools import cached_property, partial
199 from functools import cached_property, partial
196 from importlib import import_module
197 from types import SimpleNamespace
200 from types import SimpleNamespace
198 from typing import (
201 from typing import (
199 Iterable,
202 Iterable,
@@ -204,14 +207,15 b' from typing import ('
204 Any,
207 Any,
205 Sequence,
208 Sequence,
206 Dict,
209 Dict,
207 NamedTuple,
208 Pattern,
209 Optional,
210 Optional,
210 TYPE_CHECKING,
211 TYPE_CHECKING,
211 Set,
212 Set,
213 Sized,
214 TypeVar,
212 Literal,
215 Literal,
213 )
216 )
214
217
218 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
215 from IPython.core.error import TryNext
219 from IPython.core.error import TryNext
216 from IPython.core.inputtransformer2 import ESC_MAGIC
220 from IPython.core.inputtransformer2 import ESC_MAGIC
217 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
221 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
@@ -231,7 +235,6 b' from traitlets import ('
231 Unicode,
235 Unicode,
232 Dict as DictTrait,
236 Dict as DictTrait,
233 Union as UnionTrait,
237 Union as UnionTrait,
234 default,
235 observe,
238 observe,
236 )
239 )
237 from traitlets.config.configurable import Configurable
240 from traitlets.config.configurable import Configurable
@@ -254,10 +257,11 b' except ImportError:'
254
257
255 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
258 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
256 from typing import cast
259 from typing import cast
257 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias
260 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
258 else:
261 else:
262 from typing import Generic
259
263
260 def cast(obj, type_):
264 def cast(type_, obj):
261 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
265 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
262 return obj
266 return obj
263
267
@@ -266,6 +270,7 b' else:'
266 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
270 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
267 Protocol = object # requires Python >=3.8
271 Protocol = object # requires Python >=3.8
268 TypeAlias = Any # requires Python >=3.10
272 TypeAlias = Any # requires Python >=3.10
273 TypeGuard = Generic # requires Python >=3.10
269 if GENERATING_DOCUMENTATION:
274 if GENERATING_DOCUMENTATION:
270 from typing import TypedDict
275 from typing import TypedDict
271
276
@@ -296,6 +301,9 b' MATCHES_LIMIT = 500'
296 # Completion type reported when no type can be inferred.
301 # Completion type reported when no type can be inferred.
297 _UNKNOWN_TYPE = "<unknown>"
302 _UNKNOWN_TYPE = "<unknown>"
298
303
304 # sentinel value to signal lack of a match
305 not_found = object()
306
299 class ProvisionalCompleterWarning(FutureWarning):
307 class ProvisionalCompleterWarning(FutureWarning):
300 """
308 """
301 Exception raise by an experimental feature in this module.
309 Exception raise by an experimental feature in this module.
@@ -466,8 +474,9 b' class _FakeJediCompletion:'
466 self.complete = name
474 self.complete = name
467 self.type = 'crashed'
475 self.type = 'crashed'
468 self.name_with_symbols = name
476 self.name_with_symbols = name
469 self.signature = ''
477 self.signature = ""
470 self._origin = 'fake'
478 self._origin = "fake"
479 self.text = "crashed"
471
480
472 def __repr__(self):
481 def __repr__(self):
473 return '<Fake completion object jedi has crashed>'
482 return '<Fake completion object jedi has crashed>'
@@ -503,11 +512,23 b' class Completion:'
503
512
504 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
513 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
505
514
506 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
515 def __init__(
507 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
516 self,
517 start: int,
518 end: int,
519 text: str,
520 *,
521 type: Optional[str] = None,
522 _origin="",
523 signature="",
524 ) -> None:
525 warnings.warn(
526 "``Completion`` is a provisional API (as of IPython 6.0). "
508 "It may change without warnings. "
527 "It may change without warnings. "
509 "Use in corresponding context manager.",
528 "Use in corresponding context manager.",
510 category=ProvisionalCompleterWarning, stacklevel=2)
529 category=ProvisionalCompleterWarning,
530 stacklevel=2,
531 )
511
532
512 self.start = start
533 self.start = start
513 self.end = end
534 self.end = end
@@ -520,7 +541,7 b' class Completion:'
520 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
541 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
521 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
542 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
522
543
523 def __eq__(self, other)->Bool:
544 def __eq__(self, other) -> bool:
524 """
545 """
525 Equality and hash do not hash the type (as some completer may not be
546 Equality and hash do not hash the type (as some completer may not be
526 able to infer the type), but are use to (partially) de-duplicate
547 able to infer the type), but are use to (partially) de-duplicate
@@ -554,7 +575,7 b' class SimpleCompletion:'
554
575
555 __slots__ = ["text", "type"]
576 __slots__ = ["text", "type"]
556
577
557 def __init__(self, text: str, *, type: str = None):
578 def __init__(self, text: str, *, type: Optional[str] = None):
558 self.text = text
579 self.text = text
559 self.type = type
580 self.type = type
560
581
@@ -588,14 +609,18 b' class SimpleMatcherResult(_MatcherResultBase, TypedDict):'
588 # in order to get __orig_bases__ for documentation
609 # in order to get __orig_bases__ for documentation
589
610
590 #: List of candidate completions
611 #: List of candidate completions
591 completions: Sequence[SimpleCompletion]
612 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
592
613
593
614
594 class _JediMatcherResult(_MatcherResultBase):
615 class _JediMatcherResult(_MatcherResultBase):
595 """Matching result returned by Jedi (will be processed differently)"""
616 """Matching result returned by Jedi (will be processed differently)"""
596
617
597 #: list of candidate completions
618 #: list of candidate completions
598 completions: Iterable[_JediCompletionLike]
619 completions: Iterator[_JediCompletionLike]
620
621
622 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
623 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
599
624
600
625
601 @dataclass
626 @dataclass
@@ -642,16 +667,21 b' MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]'
642
667
643
668
644 class _MatcherAPIv1Base(Protocol):
669 class _MatcherAPIv1Base(Protocol):
645 def __call__(self, text: str) -> list[str]:
670 def __call__(self, text: str) -> List[str]:
646 """Call signature."""
671 """Call signature."""
672 ...
673
674 #: Used to construct the default matcher identifier
675 __qualname__: str
647
676
648
677
649 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
678 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
650 #: API version
679 #: API version
651 matcher_api_version: Optional[Literal[1]]
680 matcher_api_version: Optional[Literal[1]]
652
681
653 def __call__(self, text: str) -> list[str]:
682 def __call__(self, text: str) -> List[str]:
654 """Call signature."""
683 """Call signature."""
684 ...
655
685
656
686
657 #: Protocol describing Matcher API v1.
687 #: Protocol describing Matcher API v1.
@@ -666,26 +696,61 b' class MatcherAPIv2(Protocol):'
666
696
667 def __call__(self, context: CompletionContext) -> MatcherResult:
697 def __call__(self, context: CompletionContext) -> MatcherResult:
668 """Call signature."""
698 """Call signature."""
699 ...
700
701 #: Used to construct the default matcher identifier
702 __qualname__: str
669
703
670
704
671 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
705 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
672
706
673
707
708 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
709 api_version = _get_matcher_api_version(matcher)
710 return api_version == 1
711
712
713 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
714 api_version = _get_matcher_api_version(matcher)
715 return api_version == 2
716
717
718 def _is_sizable(value: Any) -> TypeGuard[Sized]:
719 """Determines whether objects is sizable"""
720 return hasattr(value, "__len__")
721
722
723 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
724 """Determines whether objects is sizable"""
725 return hasattr(value, "__next__")
726
727
674 def has_any_completions(result: MatcherResult) -> bool:
728 def has_any_completions(result: MatcherResult) -> bool:
675 """Check if any result includes any completions."""
729 """Check if any result includes any completions."""
676 if hasattr(result["completions"], "__len__"):
730 completions = result["completions"]
677 return len(result["completions"]) != 0
731 if _is_sizable(completions):
732 return len(completions) != 0
733 if _is_iterator(completions):
678 try:
734 try:
679 old_iterator = result["completions"]
735 old_iterator = completions
680 first = next(old_iterator)
736 first = next(old_iterator)
681 result["completions"] = itertools.chain([first], old_iterator)
737 result["completions"] = cast(
738 Iterator[SimpleCompletion],
739 itertools.chain([first], old_iterator),
740 )
682 return True
741 return True
683 except StopIteration:
742 except StopIteration:
684 return False
743 return False
744 raise ValueError(
745 "Completions returned by matcher need to be an Iterator or a Sizable"
746 )
685
747
686
748
687 def completion_matcher(
749 def completion_matcher(
688 *, priority: float = None, identifier: str = None, api_version: int = 1
750 *,
751 priority: Optional[float] = None,
752 identifier: Optional[str] = None,
753 api_version: int = 1,
689 ):
754 ):
690 """Adds attributes describing the matcher.
755 """Adds attributes describing the matcher.
691
756
@@ -708,14 +773,14 b' def completion_matcher('
708 """
773 """
709
774
710 def wrapper(func: Matcher):
775 def wrapper(func: Matcher):
711 func.matcher_priority = priority or 0
776 func.matcher_priority = priority or 0 # type: ignore
712 func.matcher_identifier = identifier or func.__qualname__
777 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
713 func.matcher_api_version = api_version
778 func.matcher_api_version = api_version # type: ignore
714 if TYPE_CHECKING:
779 if TYPE_CHECKING:
715 if api_version == 1:
780 if api_version == 1:
716 func = cast(func, MatcherAPIv1)
781 func = cast(MatcherAPIv1, func)
717 elif api_version == 2:
782 elif api_version == 2:
718 func = cast(func, MatcherAPIv2)
783 func = cast(MatcherAPIv2, func)
719 return func
784 return func
720
785
721 return wrapper
786 return wrapper
@@ -902,12 +967,44 b' class CompletionSplitter(object):'
902
967
903 class Completer(Configurable):
968 class Completer(Configurable):
904
969
905 greedy = Bool(False,
970 greedy = Bool(
906 help="""Activate greedy completion
971 False,
907 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
972 help="""Activate greedy completion.
973
974 .. deprecated:: 8.8
975 Use :any:`Completer.evaluation` and :any:`Completer.auto_close_dict_keys` instead.
908
976
909 This will enable completion on elements of lists, results of function calls, etc.,
977 When enabled in IPython 8.8 or newer, changes configuration as follows:
910 but can be unsafe because the code is actually evaluated on TAB.
978
979 - ``Completer.evaluation = 'unsafe'``
980 - ``Completer.auto_close_dict_keys = True``
981 """,
982 ).tag(config=True)
983
984 evaluation = Enum(
985 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
986 default_value="limited",
987 help="""Policy for code evaluation under completion.
988
989 Successive options allow to enable more eager evaluation for better
990 completion suggestions, including for nested dictionaries, nested lists,
991 or even results of function calls.
992 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
993 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
994
995 Allowed values are:
996
997 - ``forbidden``: no evaluation of code is permitted,
998 - ``minimal``: evaluation of literals and access to built-in namespace;
999 no item/attribute evaluationm no access to locals/globals,
1000 no evaluation of any operations or comparisons.
1001 - ``limited``: access to all namespaces, evaluation of hard-coded methods
1002 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
1003 :any:`object.__getitem__`) on allow-listed objects (for example:
1004 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
1005 - ``unsafe``: evaluation of all methods and function calls but not of
1006 syntax with side-effects like `del x`,
1007 - ``dangerous``: completely arbitrary evaluation.
911 """,
1008 """,
912 ).tag(config=True)
1009 ).tag(config=True)
913
1010
@@ -931,6 +1028,18 b' class Completer(Configurable):'
931 "Includes completion of latex commands, unicode names, and expanding "
1028 "Includes completion of latex commands, unicode names, and expanding "
932 "unicode characters back to latex commands.").tag(config=True)
1029 "unicode characters back to latex commands.").tag(config=True)
933
1030
1031 auto_close_dict_keys = Bool(
1032 False,
1033 help="""
1034 Enable auto-closing dictionary keys.
1035
1036 When enabled string keys will be suffixed with a final quote
1037 (matching the opening quote), tuple keys will also receive a
1038 separating comma if needed, and keys which are final will
1039 receive a closing bracket (``]``).
1040 """,
1041 ).tag(config=True)
1042
934 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1043 def __init__(self, namespace=None, global_namespace=None, **kwargs):
935 """Create a new completer for the command line.
1044 """Create a new completer for the command line.
936
1045
@@ -1029,26 +1138,14 b' class Completer(Configurable):'
1029 with a __getattr__ hook is evaluated.
1138 with a __getattr__ hook is evaluated.
1030
1139
1031 """
1140 """
1032
1033 # Another option, seems to work great. Catches things like ''.<tab>
1034 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
1035
1036 if m:
1037 expr, attr = m.group(1, 3)
1038 elif self.greedy:
1039 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1141 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1040 if not m2:
1142 if not m2:
1041 return []
1143 return []
1042 expr, attr = m2.group(1,2)
1144 expr, attr = m2.group(1, 2)
1043 else:
1044 return []
1045
1145
1046 try:
1146 obj = self._evaluate_expr(expr)
1047 obj = eval(expr, self.namespace)
1147
1048 except:
1148 if obj is not_found:
1049 try:
1050 obj = eval(expr, self.global_namespace)
1051 except:
1052 return []
1149 return []
1053
1150
1054 if self.limit_to__all__ and hasattr(obj, '__all__'):
1151 if self.limit_to__all__ and hasattr(obj, '__all__'):
@@ -1068,8 +1165,31 b' class Completer(Configurable):'
1068 pass
1165 pass
1069 # Build match list to return
1166 # Build match list to return
1070 n = len(attr)
1167 n = len(attr)
1071 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
1168 return ["%s.%s" % (expr, w) for w in words if w[:n] == attr]
1072
1169
1170 def _evaluate_expr(self, expr):
1171 obj = not_found
1172 done = False
1173 while not done and expr:
1174 try:
1175 obj = guarded_eval(
1176 expr,
1177 EvaluationContext(
1178 globals=self.global_namespace,
1179 locals=self.namespace,
1180 evaluation=self.evaluation,
1181 ),
1182 )
1183 done = True
1184 except Exception as e:
1185 if self.debug:
1186 print("Evaluation exception", e)
1187 # trim the expression to remove any invalid prefix
1188 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1189 # where parenthesis is not closed.
1190 # TODO: make this faster by reusing parts of the computation?
1191 expr = expr[1:]
1192 return obj
1073
1193
1074 def get__all__entries(obj):
1194 def get__all__entries(obj):
1075 """returns the strings in the __all__ attribute"""
1195 """returns the strings in the __all__ attribute"""
@@ -1081,8 +1201,82 b' def get__all__entries(obj):'
1081 return [w for w in words if isinstance(w, str)]
1201 return [w for w in words if isinstance(w, str)]
1082
1202
1083
1203
1084 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
1204 class _DictKeyState(enum.Flag):
1085 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
1205 """Represent state of the key match in context of other possible matches.
1206
1207 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1208 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1209 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1210 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1211 """
1212
1213 BASELINE = 0
1214 END_OF_ITEM = enum.auto()
1215 END_OF_TUPLE = enum.auto()
1216 IN_TUPLE = enum.auto()
1217
1218
1219 def _parse_tokens(c):
1220 """Parse tokens even if there is an error."""
1221 tokens = []
1222 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1223 while True:
1224 try:
1225 tokens.append(next(token_generator))
1226 except tokenize.TokenError:
1227 return tokens
1228 except StopIteration:
1229 return tokens
1230
1231
1232 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1233 """Match any valid Python numeric literal in a prefix of dictionary keys.
1234
1235 References:
1236 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1237 - https://docs.python.org/3/library/tokenize.html
1238 """
1239 if prefix[-1].isspace():
1240 # if user typed a space we do not have anything to complete
1241 # even if there was a valid number token before
1242 return None
1243 tokens = _parse_tokens(prefix)
1244 rev_tokens = reversed(tokens)
1245 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1246 number = None
1247 for token in rev_tokens:
1248 if token.type in skip_over:
1249 continue
1250 if number is None:
1251 if token.type == tokenize.NUMBER:
1252 number = token.string
1253 continue
1254 else:
1255 # we did not match a number
1256 return None
1257 if token.type == tokenize.OP:
1258 if token.string == ",":
1259 break
1260 if token.string in {"+", "-"}:
1261 number = token.string + number
1262 else:
1263 return None
1264 return number
1265
1266
1267 _INT_FORMATS = {
1268 "0b": bin,
1269 "0o": oct,
1270 "0x": hex,
1271 }
1272
1273
1274 def match_dict_keys(
1275 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1276 prefix: str,
1277 delims: str,
1278 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1279 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1086 """Used by dict_key_matches, matching the prefix to a list of keys
1280 """Used by dict_key_matches, matching the prefix to a list of keys
1087
1281
1088 Parameters
1282 Parameters
@@ -1102,47 +1296,89 b' def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], pre'
1102 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1296 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1103 ``quote`` being the quote that need to be used to close current string.
1297 ``quote`` being the quote that need to be used to close current string.
1104 ``token_start`` the position where the replacement should start occurring,
1298 ``token_start`` the position where the replacement should start occurring,
1105 ``matches`` a list of replacement/completion
1299 ``matches`` a dictionary of replacement/completion keys on keys and values
1106
1300 indicating whether the state.
1107 """
1301 """
1108 prefix_tuple = extra_prefix if extra_prefix else ()
1302 prefix_tuple = extra_prefix if extra_prefix else ()
1109 Nprefix = len(prefix_tuple)
1303
1304 prefix_tuple_size = sum(
1305 [
1306 # for pandas, do not count slices as taking space
1307 not isinstance(k, slice)
1308 for k in prefix_tuple
1309 ]
1310 )
1311 text_serializable_types = (str, bytes, int, float, slice)
1312
1110 def filter_prefix_tuple(key):
1313 def filter_prefix_tuple(key):
1111 # Reject too short keys
1314 # Reject too short keys
1112 if len(key) <= Nprefix:
1315 if len(key) <= prefix_tuple_size:
1113 return False
1316 return False
1114 # Reject keys with non str/bytes in it
1317 # Reject keys which cannot be serialised to text
1115 for k in key:
1318 for k in key:
1116 if not isinstance(k, (str, bytes)):
1319 if not isinstance(k, text_serializable_types):
1117 return False
1320 return False
1118 # Reject keys that do not match the prefix
1321 # Reject keys that do not match the prefix
1119 for k, pt in zip(key, prefix_tuple):
1322 for k, pt in zip(key, prefix_tuple):
1120 if k != pt:
1323 if k != pt and not isinstance(pt, slice):
1121 return False
1324 return False
1122 # All checks passed!
1325 # All checks passed!
1123 return True
1326 return True
1124
1327
1125 filtered_keys:List[Union[str,bytes]] = []
1328 filtered_key_is_final: Dict[
1126 def _add_to_filtered_keys(key):
1329 Union[str, bytes, int, float], _DictKeyState
1127 if isinstance(key, (str, bytes)):
1330 ] = defaultdict(lambda: _DictKeyState.BASELINE)
1128 filtered_keys.append(key)
1129
1331
1130 for k in keys:
1332 for k in keys:
1333 # If at least one of the matches is not final, mark as undetermined.
1334 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1335 # `111` appears final on first match but is not final on the second.
1336
1131 if isinstance(k, tuple):
1337 if isinstance(k, tuple):
1132 if filter_prefix_tuple(k):
1338 if filter_prefix_tuple(k):
1133 _add_to_filtered_keys(k[Nprefix])
1339 key_fragment = k[prefix_tuple_size]
1340 filtered_key_is_final[key_fragment] |= (
1341 _DictKeyState.END_OF_TUPLE
1342 if len(k) == prefix_tuple_size + 1
1343 else _DictKeyState.IN_TUPLE
1344 )
1345 elif prefix_tuple_size > 0:
1346 # we are completing a tuple but this key is not a tuple,
1347 # so we should ignore it
1348 pass
1134 else:
1349 else:
1135 _add_to_filtered_keys(k)
1350 if isinstance(k, text_serializable_types):
1351 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1352
1353 filtered_keys = filtered_key_is_final.keys()
1136
1354
1137 if not prefix:
1355 if not prefix:
1138 return '', 0, [repr(k) for k in filtered_keys]
1356 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1139 quote_match = re.search('["\']', prefix)
1357
1140 assert quote_match is not None # silence mypy
1358 quote_match = re.search("(?:\"|')", prefix)
1359 is_user_prefix_numeric = False
1360
1361 if quote_match:
1141 quote = quote_match.group()
1362 quote = quote_match.group()
1363 valid_prefix = prefix + quote
1142 try:
1364 try:
1143 prefix_str = eval(prefix + quote, {})
1365 prefix_str = literal_eval(valid_prefix)
1144 except Exception:
1366 except Exception:
1145 return '', 0, []
1367 return "", 0, {}
1368 else:
1369 # If it does not look like a string, let's assume
1370 # we are dealing with a number or variable.
1371 number_match = _match_number_in_dict_key_prefix(prefix)
1372
1373 # We do not want the key matcher to suggest variable names so we yield:
1374 if number_match is None:
1375 # The alternative would be to assume that user forgort the quote
1376 # and if the substring matches, suggest adding it at the start.
1377 return "", 0, {}
1378
1379 prefix_str = number_match
1380 is_user_prefix_numeric = True
1381 quote = ""
1146
1382
1147 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1383 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1148 token_match = re.search(pattern, prefix, re.UNICODE)
1384 token_match = re.search(pattern, prefix, re.UNICODE)
@@ -1150,17 +1386,36 b' def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], pre'
1150 token_start = token_match.start()
1386 token_start = token_match.start()
1151 token_prefix = token_match.group()
1387 token_prefix = token_match.group()
1152
1388
1153 matched:List[str] = []
1389 matched: Dict[str, _DictKeyState] = {}
1390
1391 str_key: Union[str, bytes]
1392
1154 for key in filtered_keys:
1393 for key in filtered_keys:
1394 if isinstance(key, (int, float)):
1395 # User typed a number but this key is not a number.
1396 if not is_user_prefix_numeric:
1397 continue
1398 str_key = str(key)
1399 if isinstance(key, int):
1400 int_base = prefix_str[:2].lower()
1401 # if user typed integer using binary/oct/hex notation:
1402 if int_base in _INT_FORMATS:
1403 int_format = _INT_FORMATS[int_base]
1404 str_key = int_format(key)
1405 else:
1406 # User typed a string but this key is a number.
1407 if is_user_prefix_numeric:
1408 continue
1409 str_key = key
1155 try:
1410 try:
1156 if not key.startswith(prefix_str):
1411 if not str_key.startswith(prefix_str):
1157 continue
1412 continue
1158 except (AttributeError, TypeError, UnicodeError):
1413 except (AttributeError, TypeError, UnicodeError) as e:
1159 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1414 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1160 continue
1415 continue
1161
1416
1162 # reformat remainder of key to begin with prefix
1417 # reformat remainder of key to begin with prefix
1163 rem = key[len(prefix_str):]
1418 rem = str_key[len(prefix_str) :]
1164 # force repr wrapped in '
1419 # force repr wrapped in '
1165 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1420 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1166 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1421 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
@@ -1171,7 +1426,9 b' def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], pre'
1171 rem_repr = rem_repr.replace('"', '\\"')
1426 rem_repr = rem_repr.replace('"', '\\"')
1172
1427
1173 # then reinsert prefix from start of token
1428 # then reinsert prefix from start of token
1174 matched.append('%s%s' % (token_prefix, rem_repr))
1429 match = "%s%s" % (token_prefix, rem_repr)
1430
1431 matched[match] = filtered_key_is_final[key]
1175 return quote, token_start, matched
1432 return quote, token_start, matched
1176
1433
1177
1434
@@ -1237,11 +1494,14 b' def position_to_cursor(text:str, offset:int)->Tuple[int, int]:'
1237 return line, col
1494 return line, col
1238
1495
1239
1496
1240 def _safe_isinstance(obj, module, class_name):
1497 def _safe_isinstance(obj, module, class_name, *attrs):
1241 """Checks if obj is an instance of module.class_name if loaded
1498 """Checks if obj is an instance of module.class_name if loaded
1242 """
1499 """
1243 return (module in sys.modules and
1500 if module in sys.modules:
1244 isinstance(obj, getattr(import_module(module), class_name)))
1501 m = sys.modules[module]
1502 for attr in [class_name, *attrs]:
1503 m = getattr(m, attr)
1504 return isinstance(obj, m)
1245
1505
1246
1506
1247 @context_matcher()
1507 @context_matcher()
@@ -1394,10 +1654,59 b' def _make_signature(completion)-> str:'
1394 _CompleteResult = Dict[str, MatcherResult]
1654 _CompleteResult = Dict[str, MatcherResult]
1395
1655
1396
1656
1657 DICT_MATCHER_REGEX = re.compile(
1658 r"""(?x)
1659 ( # match dict-referring - or any get item object - expression
1660 .+
1661 )
1662 \[ # open bracket
1663 \s* # and optional whitespace
1664 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1665 # and slices
1666 ((?:(?:
1667 (?: # closed string
1668 [uUbB]? # string prefix (r not handled)
1669 (?:
1670 '(?:[^']|(?<!\\)\\')*'
1671 |
1672 "(?:[^"]|(?<!\\)\\")*"
1673 )
1674 )
1675 |
1676 # capture integers and slices
1677 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1678 |
1679 # integer in bin/hex/oct notation
1680 0[bBxXoO]_?(?:\w|\d)+
1681 )
1682 \s*,\s*
1683 )*)
1684 ((?:
1685 (?: # unclosed string
1686 [uUbB]? # string prefix (r not handled)
1687 (?:
1688 '(?:[^']|(?<!\\)\\')*
1689 |
1690 "(?:[^"]|(?<!\\)\\")*
1691 )
1692 )
1693 |
1694 # unfinished integer
1695 (?:[-+]?\d+)
1696 |
1697 # integer in bin/hex/oct notation
1698 0[bBxXoO]_?(?:\w|\d)+
1699 )
1700 )?
1701 $
1702 """
1703 )
1704
1705
1397 def _convert_matcher_v1_result_to_v2(
1706 def _convert_matcher_v1_result_to_v2(
1398 matches: Sequence[str],
1707 matches: Sequence[str],
1399 type: str,
1708 type: str,
1400 fragment: str = None,
1709 fragment: Optional[str] = None,
1401 suppress_if_matches: bool = False,
1710 suppress_if_matches: bool = False,
1402 ) -> SimpleMatcherResult:
1711 ) -> SimpleMatcherResult:
1403 """Utility to help with transition"""
1712 """Utility to help with transition"""
@@ -1407,20 +1716,22 b' def _convert_matcher_v1_result_to_v2('
1407 }
1716 }
1408 if fragment is not None:
1717 if fragment is not None:
1409 result["matched_fragment"] = fragment
1718 result["matched_fragment"] = fragment
1410 return result
1719 return cast(SimpleMatcherResult, result)
1411
1720
1412
1721
1413 class IPCompleter(Completer):
1722 class IPCompleter(Completer):
1414 """Extension of the completer class with IPython-specific features"""
1723 """Extension of the completer class with IPython-specific features"""
1415
1724
1416 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1417
1418 @observe('greedy')
1725 @observe('greedy')
1419 def _greedy_changed(self, change):
1726 def _greedy_changed(self, change):
1420 """update the splitter and readline delims when greedy is changed"""
1727 """update the splitter and readline delims when greedy is changed"""
1421 if change['new']:
1728 if change["new"]:
1729 self.evaluation = "unsafe"
1730 self.auto_close_dict_keys = True
1422 self.splitter.delims = GREEDY_DELIMS
1731 self.splitter.delims = GREEDY_DELIMS
1423 else:
1732 else:
1733 self.evaluation = "limited"
1734 self.auto_close_dict_keys = False
1424 self.splitter.delims = DELIMS
1735 self.splitter.delims = DELIMS
1425
1736
1426 dict_keys_only = Bool(
1737 dict_keys_only = Bool(
@@ -1607,7 +1918,7 b' class IPCompleter(Completer):'
1607
1918
1608 if not self.backslash_combining_completions:
1919 if not self.backslash_combining_completions:
1609 for matcher in self._backslash_combining_matchers:
1920 for matcher in self._backslash_combining_matchers:
1610 self.disable_matchers.append(matcher.matcher_identifier)
1921 self.disable_matchers.append(_get_matcher_id(matcher))
1611
1922
1612 if not self.merge_completions:
1923 if not self.merge_completions:
1613 self.suppress_competing_matchers = True
1924 self.suppress_competing_matchers = True
@@ -1897,7 +2208,7 b' class IPCompleter(Completer):'
1897
2208
1898 def _jedi_matches(
2209 def _jedi_matches(
1899 self, cursor_column: int, cursor_line: int, text: str
2210 self, cursor_column: int, cursor_line: int, text: str
1900 ) -> Iterable[_JediCompletionLike]:
2211 ) -> Iterator[_JediCompletionLike]:
1901 """
2212 """
1902 Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
2213 Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
1903 cursor position.
2214 cursor position.
@@ -1963,15 +2274,23 b' class IPCompleter(Completer):'
1963 print("Error detecting if completing a non-finished string :", e, '|')
2274 print("Error detecting if completing a non-finished string :", e, '|')
1964
2275
1965 if not try_jedi:
2276 if not try_jedi:
1966 return []
2277 return iter([])
1967 try:
2278 try:
1968 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2279 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1969 except Exception as e:
2280 except Exception as e:
1970 if self.debug:
2281 if self.debug:
1971 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
2282 return iter(
2283 [
2284 _FakeJediCompletion(
2285 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2286 % (e)
2287 )
2288 ]
2289 )
1972 else:
2290 else:
1973 return []
2291 return iter([])
1974
2292
2293 @completion_matcher(api_version=1)
1975 def python_matches(self, text: str) -> Iterable[str]:
2294 def python_matches(self, text: str) -> Iterable[str]:
1976 """Match attributes or global python names"""
2295 """Match attributes or global python names"""
1977 if "." in text:
2296 if "." in text:
@@ -2149,12 +2468,16 b' class IPCompleter(Completer):'
2149 return method()
2468 return method()
2150
2469
2151 # Special case some common in-memory dict-like types
2470 # Special case some common in-memory dict-like types
2152 if isinstance(obj, dict) or\
2471 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2153 _safe_isinstance(obj, 'pandas', 'DataFrame'):
2154 try:
2472 try:
2155 return list(obj.keys())
2473 return list(obj.keys())
2156 except Exception:
2474 except Exception:
2157 return []
2475 return []
2476 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2477 try:
2478 return list(obj.obj.keys())
2479 except Exception:
2480 return []
2158 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2481 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2159 _safe_isinstance(obj, 'numpy', 'void'):
2482 _safe_isinstance(obj, 'numpy', 'void'):
2160 return obj.dtype.names or []
2483 return obj.dtype.names or []
@@ -2175,74 +2498,49 b' class IPCompleter(Completer):'
2175 You can use :meth:`dict_key_matcher` instead.
2498 You can use :meth:`dict_key_matcher` instead.
2176 """
2499 """
2177
2500
2178 if self.__dict_key_regexps is not None:
2501 # Short-circuit on closed dictionary (regular expression would
2179 regexps = self.__dict_key_regexps
2502 # not match anyway, but would take quite a while).
2180 else:
2503 if self.text_until_cursor.strip().endswith("]"):
2181 dict_key_re_fmt = r'''(?x)
2504 return []
2182 ( # match dict-referring expression wrt greedy setting
2183 %s
2184 )
2185 \[ # open bracket
2186 \s* # and optional whitespace
2187 # Capture any number of str-like objects (e.g. "a", "b", 'c')
2188 ((?:[uUbB]? # string prefix (r not handled)
2189 (?:
2190 '(?:[^']|(?<!\\)\\')*'
2191 |
2192 "(?:[^"]|(?<!\\)\\")*"
2193 )
2194 \s*,\s*
2195 )*)
2196 ([uUbB]? # string prefix (r not handled)
2197 (?: # unclosed string
2198 '(?:[^']|(?<!\\)\\')*
2199 |
2200 "(?:[^"]|(?<!\\)\\")*
2201 )
2202 )?
2203 $
2204 '''
2205 regexps = self.__dict_key_regexps = {
2206 False: re.compile(dict_key_re_fmt % r'''
2207 # identifiers separated by .
2208 (?!\d)\w+
2209 (?:\.(?!\d)\w+)*
2210 '''),
2211 True: re.compile(dict_key_re_fmt % '''
2212 .+
2213 ''')
2214 }
2215
2505
2216 match = regexps[self.greedy].search(self.text_until_cursor)
2506 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2217
2507
2218 if match is None:
2508 if match is None:
2219 return []
2509 return []
2220
2510
2221 expr, prefix0, prefix = match.groups()
2511 expr, prior_tuple_keys, key_prefix = match.groups()
2222 try:
2512
2223 obj = eval(expr, self.namespace)
2513 obj = self._evaluate_expr(expr)
2224 except Exception:
2514
2225 try:
2515 if obj is not_found:
2226 obj = eval(expr, self.global_namespace)
2227 except Exception:
2228 return []
2516 return []
2229
2517
2230 keys = self._get_keys(obj)
2518 keys = self._get_keys(obj)
2231 if not keys:
2519 if not keys:
2232 return keys
2520 return keys
2233
2521
2234 extra_prefix = eval(prefix0) if prefix0 != '' else None
2522 tuple_prefix = guarded_eval(
2523 prior_tuple_keys,
2524 EvaluationContext(
2525 globals=self.global_namespace,
2526 locals=self.namespace,
2527 evaluation=self.evaluation,
2528 in_subscript=True,
2529 ),
2530 )
2235
2531
2236 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
2532 closing_quote, token_offset, matches = match_dict_keys(
2533 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2534 )
2237 if not matches:
2535 if not matches:
2238 return matches
2536 return []
2239
2537
2240 # get the cursor position of
2538 # get the cursor position of
2241 # - the text being completed
2539 # - the text being completed
2242 # - the start of the key text
2540 # - the start of the key text
2243 # - the start of the completion
2541 # - the start of the completion
2244 text_start = len(self.text_until_cursor) - len(text)
2542 text_start = len(self.text_until_cursor) - len(text)
2245 if prefix:
2543 if key_prefix:
2246 key_start = match.start(3)
2544 key_start = match.start(3)
2247 completion_start = key_start + token_offset
2545 completion_start = key_start + token_offset
2248 else:
2546 else:
@@ -2254,26 +2552,61 b' class IPCompleter(Completer):'
2254 else:
2552 else:
2255 leading = text[text_start:completion_start]
2553 leading = text[text_start:completion_start]
2256
2554
2257 # the index of the `[` character
2258 bracket_idx = match.end(1)
2259
2260 # append closing quote and bracket as appropriate
2555 # append closing quote and bracket as appropriate
2261 # this is *not* appropriate if the opening quote or bracket is outside
2556 # this is *not* appropriate if the opening quote or bracket is outside
2262 # the text given to this method
2557 # the text given to this method, e.g. `d["""a\nt
2263 suf = ''
2558 can_close_quote = False
2264 continuation = self.line_buffer[len(self.text_until_cursor):]
2559 can_close_bracket = False
2265 if key_start > text_start and closing_quote:
2560
2266 # quotes were opened inside text, maybe close them
2561 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2562
2267 if continuation.startswith(closing_quote):
2563 if continuation.startswith(closing_quote):
2564 # do not close if already closed, e.g. `d['a<tab>'`
2268 continuation = continuation[len(closing_quote):]
2565 continuation = continuation[len(closing_quote) :]
2269 else:
2566 else:
2270 suf += closing_quote
2567 can_close_quote = True
2271 if bracket_idx > text_start:
2568
2272 # brackets were opened inside text, maybe close them
2569 continuation = continuation.strip()
2273 if not continuation.startswith(']'):
2570
2274 suf += ']'
2571 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2572 # handling it is out of scope, so let's avoid appending suffixes.
2573 has_known_tuple_handling = isinstance(obj, dict)
2574
2575 can_close_bracket = (
2576 not continuation.startswith("]") and self.auto_close_dict_keys
2577 )
2578 can_close_tuple_item = (
2579 not continuation.startswith(",")
2580 and has_known_tuple_handling
2581 and self.auto_close_dict_keys
2582 )
2583 can_close_quote = can_close_quote and self.auto_close_dict_keys
2275
2584
2276 return [leading + k + suf for k in matches]
2585 # fast path if closing qoute should be appended but not suffix is allowed
2586 if not can_close_quote and not can_close_bracket and closing_quote:
2587 return [leading + k for k in matches]
2588
2589 results = []
2590
2591 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2592
2593 for k, state_flag in matches.items():
2594 result = leading + k
2595 if can_close_quote and closing_quote:
2596 result += closing_quote
2597
2598 if state_flag == end_of_tuple_or_item:
2599 # We do not know which suffix to add,
2600 # e.g. both tuple item and string
2601 # match this item.
2602 pass
2603
2604 if state_flag in end_of_tuple_or_item and can_close_bracket:
2605 result += "]"
2606 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2607 result += ", "
2608 results.append(result)
2609 return results
2277
2610
2278 @context_matcher()
2611 @context_matcher()
2279 def unicode_name_matcher(self, context: CompletionContext):
2612 def unicode_name_matcher(self, context: CompletionContext):
@@ -2516,17 +2849,23 b' class IPCompleter(Completer):'
2516
2849
2517 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2850 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2518
2851
2852 def is_non_jedi_result(
2853 result: MatcherResult, identifier: str
2854 ) -> TypeGuard[SimpleMatcherResult]:
2855 return identifier != jedi_matcher_id
2856
2519 results = self._complete(
2857 results = self._complete(
2520 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2858 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2521 )
2859 )
2860
2522 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2861 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2523 identifier: result
2862 identifier: result
2524 for identifier, result in results.items()
2863 for identifier, result in results.items()
2525 if identifier != jedi_matcher_id
2864 if is_non_jedi_result(result, identifier)
2526 }
2865 }
2527
2866
2528 jedi_matches = (
2867 jedi_matches = (
2529 cast(results[jedi_matcher_id], _JediMatcherResult)["completions"]
2868 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2530 if jedi_matcher_id in results
2869 if jedi_matcher_id in results
2531 else ()
2870 else ()
2532 )
2871 )
@@ -2581,8 +2920,8 b' class IPCompleter(Completer):'
2581 signature="",
2920 signature="",
2582 )
2921 )
2583
2922
2584 ordered = []
2923 ordered: List[Completion] = []
2585 sortable = []
2924 sortable: List[Completion] = []
2586
2925
2587 for origin, result in non_jedi_results.items():
2926 for origin, result in non_jedi_results.items():
2588 matched_text = result["matched_fragment"]
2927 matched_text = result["matched_fragment"]
@@ -2672,8 +3011,8 b' class IPCompleter(Completer):'
2672 abort_if_offset_changes: bool,
3011 abort_if_offset_changes: bool,
2673 ):
3012 ):
2674
3013
2675 sortable = []
3014 sortable: List[AnyMatcherCompletion] = []
2676 ordered = []
3015 ordered: List[AnyMatcherCompletion] = []
2677 most_recent_fragment = None
3016 most_recent_fragment = None
2678 for identifier, result in results.items():
3017 for identifier, result in results.items():
2679 if identifier in skip_matchers:
3018 if identifier in skip_matchers:
@@ -2772,11 +3111,11 b' class IPCompleter(Completer):'
2772 )
3111 )
2773
3112
2774 # Start with a clean slate of completions
3113 # Start with a clean slate of completions
2775 results = {}
3114 results: Dict[str, MatcherResult] = {}
2776
3115
2777 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3116 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2778
3117
2779 suppressed_matchers = set()
3118 suppressed_matchers: Set[str] = set()
2780
3119
2781 matchers = {
3120 matchers = {
2782 _get_matcher_id(matcher): matcher
3121 _get_matcher_id(matcher): matcher
@@ -2786,7 +3125,6 b' class IPCompleter(Completer):'
2786 }
3125 }
2787
3126
2788 for matcher_id, matcher in matchers.items():
3127 for matcher_id, matcher in matchers.items():
2789 api_version = _get_matcher_api_version(matcher)
2790 matcher_id = _get_matcher_id(matcher)
3128 matcher_id = _get_matcher_id(matcher)
2791
3129
2792 if matcher_id in self.disable_matchers:
3130 if matcher_id in self.disable_matchers:
@@ -2798,14 +3136,16 b' class IPCompleter(Completer):'
2798 if matcher_id in suppressed_matchers:
3136 if matcher_id in suppressed_matchers:
2799 continue
3137 continue
2800
3138
3139 result: MatcherResult
2801 try:
3140 try:
2802 if api_version == 1:
3141 if _is_matcher_v1(matcher):
2803 result = _convert_matcher_v1_result_to_v2(
3142 result = _convert_matcher_v1_result_to_v2(
2804 matcher(text), type=_UNKNOWN_TYPE
3143 matcher(text), type=_UNKNOWN_TYPE
2805 )
3144 )
2806 elif api_version == 2:
3145 elif _is_matcher_v2(matcher):
2807 result = cast(matcher, MatcherAPIv2)(context)
3146 result = matcher(context)
2808 else:
3147 else:
3148 api_version = _get_matcher_api_version(matcher)
2809 raise ValueError(f"Unsupported API version {api_version}")
3149 raise ValueError(f"Unsupported API version {api_version}")
2810 except:
3150 except:
2811 # Show the ugly traceback if the matcher causes an
3151 # Show the ugly traceback if the matcher causes an
@@ -2817,7 +3157,9 b' class IPCompleter(Completer):'
2817 result["matched_fragment"] = result.get("matched_fragment", context.token)
3157 result["matched_fragment"] = result.get("matched_fragment", context.token)
2818
3158
2819 if not suppressed_matchers:
3159 if not suppressed_matchers:
2820 suppression_recommended = result.get("suppress", False)
3160 suppression_recommended: Union[bool, Set[str]] = result.get(
3161 "suppress", False
3162 )
2821
3163
2822 suppression_config = (
3164 suppression_config = (
2823 self.suppress_competing_matchers.get(matcher_id, None)
3165 self.suppress_competing_matchers.get(matcher_id, None)
@@ -2830,10 +3172,12 b' class IPCompleter(Completer):'
2830 ) and has_any_completions(result)
3172 ) and has_any_completions(result)
2831
3173
2832 if should_suppress:
3174 if should_suppress:
2833 suppression_exceptions = result.get("do_not_suppress", set())
3175 suppression_exceptions: Set[str] = result.get(
2834 try:
3176 "do_not_suppress", set()
3177 )
3178 if isinstance(suppression_recommended, Iterable):
2835 to_suppress = set(suppression_recommended)
3179 to_suppress = set(suppression_recommended)
2836 except TypeError:
3180 else:
2837 to_suppress = set(matchers)
3181 to_suppress = set(matchers)
2838 suppressed_matchers = to_suppress - suppression_exceptions
3182 suppressed_matchers = to_suppress - suppression_exceptions
2839
3183
@@ -2860,9 +3204,9 b' class IPCompleter(Completer):'
2860
3204
2861 @staticmethod
3205 @staticmethod
2862 def _deduplicate(
3206 def _deduplicate(
2863 matches: Sequence[SimpleCompletion],
3207 matches: Sequence[AnyCompletion],
2864 ) -> Iterable[SimpleCompletion]:
3208 ) -> Iterable[AnyCompletion]:
2865 filtered_matches = {}
3209 filtered_matches: Dict[str, AnyCompletion] = {}
2866 for match in matches:
3210 for match in matches:
2867 text = match.text
3211 text = match.text
2868 if (
3212 if (
@@ -2874,7 +3218,7 b' class IPCompleter(Completer):'
2874 return filtered_matches.values()
3218 return filtered_matches.values()
2875
3219
2876 @staticmethod
3220 @staticmethod
2877 def _sort(matches: Sequence[SimpleCompletion]):
3221 def _sort(matches: Sequence[AnyCompletion]):
2878 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3222 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
2879
3223
2880 @context_matcher()
3224 @context_matcher()
@@ -389,6 +389,9 b' class InteractiveShell(SingletonConfigurable):'
389 displayhook_class = Type(DisplayHook)
389 displayhook_class = Type(DisplayHook)
390 display_pub_class = Type(DisplayPublisher)
390 display_pub_class = Type(DisplayPublisher)
391 compiler_class = Type(CachingCompiler)
391 compiler_class = Type(CachingCompiler)
392 inspector_class = Type(
393 oinspect.Inspector, help="Class to use to instantiate the shell inspector"
394 ).tag(config=True)
392
395
393 sphinxify_docstring = Bool(False, help=
396 sphinxify_docstring = Bool(False, help=
394 """
397 """
@@ -755,10 +758,12 b' class InteractiveShell(SingletonConfigurable):'
755 @observe('colors')
758 @observe('colors')
756 def init_inspector(self, changes=None):
759 def init_inspector(self, changes=None):
757 # Object inspector
760 # Object inspector
758 self.inspector = oinspect.Inspector(oinspect.InspectColors,
761 self.inspector = self.inspector_class(
762 oinspect.InspectColors,
759 PyColorize.ANSICodeColors,
763 PyColorize.ANSICodeColors,
760 self.colors,
764 self.colors,
761 self.object_info_string_level)
765 self.object_info_string_level,
766 )
762
767
763 def init_io(self):
768 def init_io(self):
764 # implemented in subclasses, TerminalInteractiveShell does call
769 # implemented in subclasses, TerminalInteractiveShell does call
@@ -3138,8 +3143,12 b' class InteractiveShell(SingletonConfigurable):'
3138 else:
3143 else:
3139 cell = raw_cell
3144 cell = raw_cell
3140
3145
3146 # Do NOT store paste/cpaste magic history
3147 if "get_ipython().run_line_magic(" in cell and "paste" in cell:
3148 store_history = False
3149
3141 # Store raw and processed history
3150 # Store raw and processed history
3142 if store_history and raw_cell.strip(" %") != "paste":
3151 if store_history:
3143 self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
3152 self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
3144 if not silent:
3153 if not silent:
3145 self.logger.log(cell, raw_cell)
3154 self.logger.log(cell, raw_cell)
@@ -68,94 +68,22 b' class ConfigMagics(Magics):'
68 To view what is configurable on a given class, just pass the class
68 To view what is configurable on a given class, just pass the class
69 name::
69 name::
70
70
71 In [2]: %config IPCompleter
71 In [2]: %config LoggingMagics
72 IPCompleter(Completer) options
72 LoggingMagics(Magics) options
73 ----------------------------
73 ---------------------------
74 IPCompleter.backslash_combining_completions=<Bool>
74 LoggingMagics.quiet=<Bool>
75 Enable unicode completions, e.g. \\alpha<tab> . Includes completion of latex
75 Suppress output of log state when logging is enabled
76 commands, unicode names, and expanding unicode characters back to latex
77 commands.
78 Current: True
79 IPCompleter.debug=<Bool>
80 Enable debug for the Completer. Mostly print extra information for
81 experimental jedi integration.
82 Current: False
76 Current: False
83 IPCompleter.disable_matchers=<list-item-1>...
84 List of matchers to disable.
85 The list should contain matcher identifiers (see
86 :any:`completion_matcher`).
87 Current: []
88 IPCompleter.greedy=<Bool>
89 Activate greedy completion
90 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
91 This will enable completion on elements of lists, results of function calls, etc.,
92 but can be unsafe because the code is actually evaluated on TAB.
93 Current: False
94 IPCompleter.jedi_compute_type_timeout=<Int>
95 Experimental: restrict time (in milliseconds) during which Jedi can compute types.
96 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
97 performance by preventing jedi to build its cache.
98 Current: 400
99 IPCompleter.limit_to__all__=<Bool>
100 DEPRECATED as of version 5.0.
101 Instruct the completer to use __all__ for the completion
102 Specifically, when completing on ``object.<tab>``.
103 When True: only those names in obj.__all__ will be included.
104 When False [default]: the __all__ attribute is ignored
105 Current: False
106 IPCompleter.merge_completions=<Bool>
107 Whether to merge completion results into a single list
108 If False, only the completion results from the first non-empty
109 completer will be returned.
110 As of version 8.6.0, setting the value to ``False`` is an alias for:
111 ``IPCompleter.suppress_competing_matchers = True.``.
112 Current: True
113 IPCompleter.omit__names=<Enum>
114 Instruct the completer to omit private method names
115 Specifically, when completing on ``object.<tab>``.
116 When 2 [default]: all names that start with '_' will be excluded.
117 When 1: all 'magic' names (``__foo__``) will be excluded.
118 When 0: nothing will be excluded.
119 Choices: any of [0, 1, 2]
120 Current: 2
121 IPCompleter.profile_completions=<Bool>
122 If True, emit profiling data for completion subsystem using cProfile.
123 Current: False
124 IPCompleter.profiler_output_dir=<Unicode>
125 Template for path at which to output profile data for completions.
126 Current: '.completion_profiles'
127 IPCompleter.suppress_competing_matchers=<Union>
128 Whether to suppress completions from other *Matchers*.
129 When set to ``None`` (default) the matchers will attempt to auto-detect
130 whether suppression of other matchers is desirable. For example, at the
131 beginning of a line followed by `%` we expect a magic completion to be the
132 only applicable option, and after ``my_dict['`` we usually expect a
133 completion with an existing dictionary key.
134 If you want to disable this heuristic and see completions from all matchers,
135 set ``IPCompleter.suppress_competing_matchers = False``. To disable the
136 heuristic for specific matchers provide a dictionary mapping:
137 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher':
138 False}``.
139 Set ``IPCompleter.suppress_competing_matchers = True`` to limit completions
140 to the set of matchers with the highest priority; this is equivalent to
141 ``IPCompleter.merge_completions`` and can be beneficial for performance, but
142 will sometimes omit relevant candidates from matchers further down the
143 priority list.
144 Current: None
145 IPCompleter.use_jedi=<Bool>
146 Experimental: Use Jedi to generate autocompletions. Default to True if jedi
147 is installed.
148 Current: True
149
77
150 but the real use is in setting values::
78 but the real use is in setting values::
151
79
152 In [3]: %config IPCompleter.greedy = True
80 In [3]: %config LoggingMagics.quiet = True
153
81
154 and these values are read from the user_ns if they are variables::
82 and these values are read from the user_ns if they are variables::
155
83
156 In [4]: feeling_greedy=False
84 In [4]: feeling_quiet=False
157
85
158 In [5]: %config IPCompleter.greedy = feeling_greedy
86 In [5]: %config LoggingMagics.quiet = feeling_quiet
159
87
160 """
88 """
161 from traitlets.config.loader import Config
89 from traitlets.config.loader import Config
@@ -16,6 +16,7 b" __all__ = ['Inspector','InspectColors']"
16 import ast
16 import ast
17 import inspect
17 import inspect
18 from inspect import signature
18 from inspect import signature
19 import html
19 import linecache
20 import linecache
20 import warnings
21 import warnings
21 import os
22 import os
@@ -530,8 +531,8 b' class Inspector(Colorable):'
530
531
531 """
532 """
532 defaults = {
533 defaults = {
533 'text/plain': text,
534 "text/plain": text,
534 'text/html': '<pre>' + text + '</pre>'
535 "text/html": f"<pre>{html.escape(text)}</pre>",
535 }
536 }
536
537
537 if formatter is None:
538 if formatter is None:
@@ -542,66 +543,66 b' class Inspector(Colorable):'
542 if not isinstance(formatted, dict):
543 if not isinstance(formatted, dict):
543 # Handle the deprecated behavior of a formatter returning
544 # Handle the deprecated behavior of a formatter returning
544 # a string instead of a mime bundle.
545 # a string instead of a mime bundle.
545 return {
546 return {"text/plain": formatted, "text/html": f"<pre>{formatted}</pre>"}
546 'text/plain': formatted,
547 'text/html': '<pre>' + formatted + '</pre>'
548 }
549
547
550 else:
548 else:
551 return dict(defaults, **formatted)
549 return dict(defaults, **formatted)
552
550
553
551
554 def format_mime(self, bundle):
552 def format_mime(self, bundle):
553 """Format a mimebundle being created by _make_info_unformatted into a real mimebundle"""
554 # Format text/plain mimetype
555 if isinstance(bundle["text/plain"], (list, tuple)):
556 # bundle['text/plain'] is a list of (head, formatted body) pairs
557 lines = []
558 _len = max(len(h) for h, _ in bundle["text/plain"])
555
559
556 text_plain = bundle['text/plain']
560 for head, body in bundle["text/plain"]:
557
561 body = body.strip("\n")
558 text = ''
562 delim = "\n" if "\n" in body else " "
559 heads, bodies = list(zip(*text_plain))
563 lines.append(
560 _len = max(len(h) for h in heads)
564 f"{self.__head(head+':')}{(_len - len(head))*' '}{delim}{body}"
565 )
561
566
562 for head, body in zip(heads, bodies):
567 bundle["text/plain"] = "\n".join(lines)
563 body = body.strip('\n')
564 delim = '\n' if '\n' in body else ' '
565 text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n'
566
568
567 bundle['text/plain'] = text
569 # Format the text/html mimetype
570 if isinstance(bundle["text/html"], (list, tuple)):
571 # bundle['text/html'] is a list of (head, formatted body) pairs
572 bundle["text/html"] = "\n".join(
573 (f"<h1>{head}</h1>\n{body}" for (head, body) in bundle["text/html"])
574 )
568 return bundle
575 return bundle
569
576
570 def _get_info(
577 def _append_info_field(
571 self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=()
578 self, bundle, title: str, key: str, info, omit_sections, formatter
572 ):
579 ):
573 """Retrieve an info dict and format it.
580 """Append an info value to the unformatted mimebundle being constructed by _make_info_unformatted"""
574
575 Parameters
576 ----------
577 obj : any
578 Object to inspect and return info from
579 oname : str (default: ''):
580 Name of the variable pointing to `obj`.
581 formatter : callable
582 info
583 already computed information
584 detail_level : integer
585 Granularity of detail level, if set to 1, give more information.
586 omit_sections : container[str]
587 Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
588 """
589
590 info = self.info(obj, oname=oname, info=info, detail_level=detail_level)
591
592 _mime = {
593 'text/plain': [],
594 'text/html': '',
595 }
596
597 def append_field(bundle, title:str, key:str, formatter=None):
598 if title in omit_sections or key in omit_sections:
581 if title in omit_sections or key in omit_sections:
599 return
582 return
600 field = info[key]
583 field = info[key]
601 if field is not None:
584 if field is not None:
602 formatted_field = self._mime_format(field, formatter)
585 formatted_field = self._mime_format(field, formatter)
603 bundle['text/plain'].append((title, formatted_field['text/plain']))
586 bundle["text/plain"].append((title, formatted_field["text/plain"]))
604 bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n'
587 bundle["text/html"].append((title, formatted_field["text/html"]))
588
589 def _make_info_unformatted(self, obj, info, formatter, detail_level, omit_sections):
590 """Assemble the mimebundle as unformatted lists of information"""
591 bundle = {
592 "text/plain": [],
593 "text/html": [],
594 }
595
596 # A convenience function to simplify calls below
597 def append_field(bundle, title: str, key: str, formatter=None):
598 self._append_info_field(
599 bundle,
600 title=title,
601 key=key,
602 info=info,
603 omit_sections=omit_sections,
604 formatter=formatter,
605 )
605
606
606 def code_formatter(text):
607 def code_formatter(text):
607 return {
608 return {
@@ -609,57 +610,82 b' class Inspector(Colorable):'
609 'text/html': pylight(text)
610 'text/html': pylight(text)
610 }
611 }
611
612
612 if info['isalias']:
613 if info["isalias"]:
613 append_field(_mime, 'Repr', 'string_form')
614 append_field(bundle, "Repr", "string_form")
614
615
615 elif info['ismagic']:
616 elif info['ismagic']:
616 if detail_level > 0:
617 if detail_level > 0:
617 append_field(_mime, 'Source', 'source', code_formatter)
618 append_field(bundle, "Source", "source", code_formatter)
618 else:
619 else:
619 append_field(_mime, 'Docstring', 'docstring', formatter)
620 append_field(bundle, "Docstring", "docstring", formatter)
620 append_field(_mime, 'File', 'file')
621 append_field(bundle, "File", "file")
621
622
622 elif info['isclass'] or is_simple_callable(obj):
623 elif info['isclass'] or is_simple_callable(obj):
623 # Functions, methods, classes
624 # Functions, methods, classes
624 append_field(_mime, 'Signature', 'definition', code_formatter)
625 append_field(bundle, "Signature", "definition", code_formatter)
625 append_field(_mime, 'Init signature', 'init_definition', code_formatter)
626 append_field(bundle, "Init signature", "init_definition", code_formatter)
626 append_field(_mime, 'Docstring', 'docstring', formatter)
627 append_field(bundle, "Docstring", "docstring", formatter)
627 if detail_level > 0 and info['source']:
628 if detail_level > 0 and info["source"]:
628 append_field(_mime, 'Source', 'source', code_formatter)
629 append_field(bundle, "Source", "source", code_formatter)
629 else:
630 else:
630 append_field(_mime, 'Init docstring', 'init_docstring', formatter)
631 append_field(bundle, "Init docstring", "init_docstring", formatter)
631
632
632 append_field(_mime, 'File', 'file')
633 append_field(bundle, "File", "file")
633 append_field(_mime, 'Type', 'type_name')
634 append_field(bundle, "Type", "type_name")
634 append_field(_mime, 'Subclasses', 'subclasses')
635 append_field(bundle, "Subclasses", "subclasses")
635
636
636 else:
637 else:
637 # General Python objects
638 # General Python objects
638 append_field(_mime, 'Signature', 'definition', code_formatter)
639 append_field(bundle, "Signature", "definition", code_formatter)
639 append_field(_mime, 'Call signature', 'call_def', code_formatter)
640 append_field(bundle, "Call signature", "call_def", code_formatter)
640 append_field(_mime, 'Type', 'type_name')
641 append_field(bundle, "Type", "type_name")
641 append_field(_mime, 'String form', 'string_form')
642 append_field(bundle, "String form", "string_form")
642
643
643 # Namespace
644 # Namespace
644 if info['namespace'] != 'Interactive':
645 if info["namespace"] != "Interactive":
645 append_field(_mime, 'Namespace', 'namespace')
646 append_field(bundle, "Namespace", "namespace")
646
647
647 append_field(_mime, 'Length', 'length')
648 append_field(bundle, "Length", "length")
648 append_field(_mime, 'File', 'file')
649 append_field(bundle, "File", "file")
649
650
650 # Source or docstring, depending on detail level and whether
651 # Source or docstring, depending on detail level and whether
651 # source found.
652 # source found.
652 if detail_level > 0 and info['source']:
653 if detail_level > 0 and info["source"]:
653 append_field(_mime, 'Source', 'source', code_formatter)
654 append_field(bundle, "Source", "source", code_formatter)
654 else:
655 else:
655 append_field(_mime, 'Docstring', 'docstring', formatter)
656 append_field(bundle, "Docstring", "docstring", formatter)
657
658 append_field(bundle, "Class docstring", "class_docstring", formatter)
659 append_field(bundle, "Init docstring", "init_docstring", formatter)
660 append_field(bundle, "Call docstring", "call_docstring", formatter)
661 return bundle
656
662
657 append_field(_mime, 'Class docstring', 'class_docstring', formatter)
658 append_field(_mime, 'Init docstring', 'init_docstring', formatter)
659 append_field(_mime, 'Call docstring', 'call_docstring', formatter)
660
663
664 def _get_info(
665 self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=()
666 ):
667 """Retrieve an info dict and format it.
668
669 Parameters
670 ----------
671 obj : any
672 Object to inspect and return info from
673 oname : str (default: ''):
674 Name of the variable pointing to `obj`.
675 formatter : callable
676 info
677 already computed information
678 detail_level : integer
679 Granularity of detail level, if set to 1, give more information.
680 omit_sections : container[str]
681 Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
682 """
661
683
662 return self.format_mime(_mime)
684 info = self.info(obj, oname=oname, info=info, detail_level=detail_level)
685 bundle = self._make_info_unformatted(
686 obj, info, formatter, detail_level=detail_level, omit_sections=omit_sections
687 )
688 return self.format_mime(bundle)
663
689
664 def pinfo(
690 def pinfo(
665 self,
691 self,
@@ -24,6 +24,7 b' from IPython.core.completer import ('
24 provisionalcompleter,
24 provisionalcompleter,
25 match_dict_keys,
25 match_dict_keys,
26 _deduplicate_completions,
26 _deduplicate_completions,
27 _match_number_in_dict_key_prefix,
27 completion_matcher,
28 completion_matcher,
28 SimpleCompletion,
29 SimpleCompletion,
29 CompletionContext,
30 CompletionContext,
@@ -113,6 +114,17 b' def greedy_completion():'
113
114
114
115
115 @contextmanager
116 @contextmanager
117 def evaluation_policy(evaluation: str):
118 ip = get_ipython()
119 evaluation_original = ip.Completer.evaluation
120 try:
121 ip.Completer.evaluation = evaluation
122 yield
123 finally:
124 ip.Completer.evaluation = evaluation_original
125
126
127 @contextmanager
116 def custom_matchers(matchers):
128 def custom_matchers(matchers):
117 ip = get_ipython()
129 ip = get_ipython()
118 try:
130 try:
@@ -170,7 +182,6 b' def check_line_split(splitter, test_specs):'
170 out = splitter.split_line(line, cursor_pos)
182 out = splitter.split_line(line, cursor_pos)
171 assert out == split
183 assert out == split
172
184
173
174 def test_line_split():
185 def test_line_split():
175 """Basic line splitter test with default specs."""
186 """Basic line splitter test with default specs."""
176 sp = completer.CompletionSplitter()
187 sp = completer.CompletionSplitter()
@@ -841,18 +852,45 b' class TestCompleter(unittest.TestCase):'
841 """
852 """
842 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
853 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
843
854
844 keys = ["foo", b"far"]
855 def match(*args, **kwargs):
845 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
856 quote, offset, matches = match_dict_keys(*args, delims=delims, **kwargs)
846 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
857 return quote, offset, list(matches)
847 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
848 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
849
850 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
851 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
852 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
853 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
854
858
855 match_dict_keys
859 keys = ["foo", b"far"]
860 assert match(keys, "b'") == ("'", 2, ["far"])
861 assert match(keys, "b'f") == ("'", 2, ["far"])
862 assert match(keys, 'b"') == ('"', 2, ["far"])
863 assert match(keys, 'b"f') == ('"', 2, ["far"])
864
865 assert match(keys, "'") == ("'", 1, ["foo"])
866 assert match(keys, "'f") == ("'", 1, ["foo"])
867 assert match(keys, '"') == ('"', 1, ["foo"])
868 assert match(keys, '"f') == ('"', 1, ["foo"])
869
870 # Completion on first item of tuple
871 keys = [("foo", 1111), ("foo", 2222), (3333, "bar"), (3333, "test")]
872 assert match(keys, "'f") == ("'", 1, ["foo"])
873 assert match(keys, "33") == ("", 0, ["3333"])
874
875 # Completion on numbers
876 keys = [
877 0xDEADBEEF,
878 1111,
879 1234,
880 "1999",
881 0b10101,
882 22,
883 ] # 0xDEADBEEF = 3735928559; 0b10101 = 21
884 assert match(keys, "0xdead") == ("", 0, ["0xdeadbeef"])
885 assert match(keys, "1") == ("", 0, ["1111", "1234"])
886 assert match(keys, "2") == ("", 0, ["21", "22"])
887 assert match(keys, "0b101") == ("", 0, ["0b10101", "0b10110"])
888
889 # Should yield on variables
890 assert match(keys, "a_variable") == ("", 0, [])
891
892 # Should pass over invalid literals
893 assert match(keys, "'' ''") == ("", 0, [])
856
894
857 def test_match_dict_keys_tuple(self):
895 def test_match_dict_keys_tuple(self):
858 """
896 """
@@ -863,25 +901,91 b' class TestCompleter(unittest.TestCase):'
863
901
864 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
902 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
865
903
904 def match(*args, extra=None, **kwargs):
905 quote, offset, matches = match_dict_keys(
906 *args, delims=delims, extra_prefix=extra, **kwargs
907 )
908 return quote, offset, list(matches)
909
866 # Completion on first key == "foo"
910 # Completion on first key == "foo"
867 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
911 assert match(keys, "'", extra=("foo",)) == ("'", 1, ["bar", "oof"])
868 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
912 assert match(keys, '"', extra=("foo",)) == ('"', 1, ["bar", "oof"])
869 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
913 assert match(keys, "'o", extra=("foo",)) == ("'", 1, ["oof"])
870 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
914 assert match(keys, '"o', extra=("foo",)) == ('"', 1, ["oof"])
871 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
915 assert match(keys, "b'", extra=("foo",)) == ("'", 2, ["bar"])
872 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
916 assert match(keys, 'b"', extra=("foo",)) == ('"', 2, ["bar"])
873 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
917 assert match(keys, "b'b", extra=("foo",)) == ("'", 2, ["bar"])
874 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
918 assert match(keys, 'b"b', extra=("foo",)) == ('"', 2, ["bar"])
875
919
876 # No Completion
920 # No Completion
877 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
921 assert match(keys, "'", extra=("no_foo",)) == ("'", 1, [])
878 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
922 assert match(keys, "'", extra=("fo",)) == ("'", 1, [])
923
924 keys = [("foo1", "foo2", "foo3", "foo4"), ("foo1", "foo2", "bar", "foo4")]
925 assert match(keys, "'foo", extra=("foo1",)) == ("'", 1, ["foo2"])
926 assert match(keys, "'foo", extra=("foo1", "foo2")) == ("'", 1, ["foo3"])
927 assert match(keys, "'foo", extra=("foo1", "foo2", "foo3")) == ("'", 1, ["foo4"])
928 assert match(keys, "'foo", extra=("foo1", "foo2", "foo3", "foo4")) == (
929 "'",
930 1,
931 [],
932 )
933
934 keys = [("foo", 1111), ("foo", "2222"), (3333, "bar"), (3333, 4444)]
935 assert match(keys, "'", extra=("foo",)) == ("'", 1, ["2222"])
936 assert match(keys, "", extra=("foo",)) == ("", 0, ["1111", "'2222'"])
937 assert match(keys, "'", extra=(3333,)) == ("'", 1, ["bar"])
938 assert match(keys, "", extra=(3333,)) == ("", 0, ["'bar'", "4444"])
939 assert match(keys, "'", extra=("3333",)) == ("'", 1, [])
940 assert match(keys, "33") == ("", 0, ["3333"])
941
942 def test_dict_key_completion_closures(self):
943 ip = get_ipython()
944 complete = ip.Completer.complete
945 ip.Completer.auto_close_dict_keys = True
946
947 ip.user_ns["d"] = {
948 # tuple only
949 ("aa", 11): None,
950 # tuple and non-tuple
951 ("bb", 22): None,
952 "bb": None,
953 # non-tuple only
954 "cc": None,
955 # numeric tuple only
956 (77, "x"): None,
957 # numeric tuple and non-tuple
958 (88, "y"): None,
959 88: None,
960 # numeric non-tuple only
961 99: None,
962 }
879
963
880 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
964 _, matches = complete(line_buffer="d[")
881 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
965 # should append `, ` if matches a tuple only
882 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
966 self.assertIn("'aa', ", matches)
883 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
967 # should not append anything if matches a tuple and an item
884 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
968 self.assertIn("'bb'", matches)
969 # should append `]` if matches and item only
970 self.assertIn("'cc']", matches)
971
972 # should append `, ` if matches a tuple only
973 self.assertIn("77, ", matches)
974 # should not append anything if matches a tuple and an item
975 self.assertIn("88", matches)
976 # should append `]` if matches and item only
977 self.assertIn("99]", matches)
978
979 _, matches = complete(line_buffer="d['aa', ")
980 # should restrict matches to those matching tuple prefix
981 self.assertIn("11]", matches)
982 self.assertNotIn("'bb'", matches)
983 self.assertNotIn("'bb', ", matches)
984 self.assertNotIn("'bb']", matches)
985 self.assertNotIn("'cc'", matches)
986 self.assertNotIn("'cc', ", matches)
987 self.assertNotIn("'cc']", matches)
988 ip.Completer.auto_close_dict_keys = False
885
989
886 def test_dict_key_completion_string(self):
990 def test_dict_key_completion_string(self):
887 """Test dictionary key completion for string keys"""
991 """Test dictionary key completion for string keys"""
@@ -1038,6 +1142,35 b' class TestCompleter(unittest.TestCase):'
1038 self.assertNotIn("foo", matches)
1142 self.assertNotIn("foo", matches)
1039 self.assertNotIn("bar", matches)
1143 self.assertNotIn("bar", matches)
1040
1144
1145 def test_dict_key_completion_numbers(self):
1146 ip = get_ipython()
1147 complete = ip.Completer.complete
1148
1149 ip.user_ns["d"] = {
1150 0xDEADBEEF: None, # 3735928559
1151 1111: None,
1152 1234: None,
1153 "1999": None,
1154 0b10101: None, # 21
1155 22: None,
1156 }
1157 _, matches = complete(line_buffer="d[1")
1158 self.assertIn("1111", matches)
1159 self.assertIn("1234", matches)
1160 self.assertNotIn("1999", matches)
1161 self.assertNotIn("'1999'", matches)
1162
1163 _, matches = complete(line_buffer="d[0xdead")
1164 self.assertIn("0xdeadbeef", matches)
1165
1166 _, matches = complete(line_buffer="d[2")
1167 self.assertIn("21", matches)
1168 self.assertIn("22", matches)
1169
1170 _, matches = complete(line_buffer="d[0b101")
1171 self.assertIn("0b10101", matches)
1172 self.assertIn("0b10110", matches)
1173
1041 def test_dict_key_completion_contexts(self):
1174 def test_dict_key_completion_contexts(self):
1042 """Test expression contexts in which dict key completion occurs"""
1175 """Test expression contexts in which dict key completion occurs"""
1043 ip = get_ipython()
1176 ip = get_ipython()
@@ -1050,6 +1183,7 b' class TestCompleter(unittest.TestCase):'
1050
1183
1051 ip.user_ns["C"] = C
1184 ip.user_ns["C"] = C
1052 ip.user_ns["get"] = lambda: d
1185 ip.user_ns["get"] = lambda: d
1186 ip.user_ns["nested"] = {"x": d}
1053
1187
1054 def assert_no_completion(**kwargs):
1188 def assert_no_completion(**kwargs):
1055 _, matches = complete(**kwargs)
1189 _, matches = complete(**kwargs)
@@ -1075,6 +1209,13 b' class TestCompleter(unittest.TestCase):'
1075 assert_completion(line_buffer="(d[")
1209 assert_completion(line_buffer="(d[")
1076 assert_completion(line_buffer="C.data[")
1210 assert_completion(line_buffer="C.data[")
1077
1211
1212 # nested dict completion
1213 assert_completion(line_buffer="nested['x'][")
1214
1215 with evaluation_policy("minimal"):
1216 with pytest.raises(AssertionError):
1217 assert_completion(line_buffer="nested['x'][")
1218
1078 # greedy flag
1219 # greedy flag
1079 def assert_completion(**kwargs):
1220 def assert_completion(**kwargs):
1080 _, matches = complete(**kwargs)
1221 _, matches = complete(**kwargs)
@@ -1162,12 +1303,22 b' class TestCompleter(unittest.TestCase):'
1162 _, matches = complete(line_buffer="d['")
1303 _, matches = complete(line_buffer="d['")
1163 self.assertIn("my_head", matches)
1304 self.assertIn("my_head", matches)
1164 self.assertIn("my_data", matches)
1305 self.assertIn("my_data", matches)
1165 # complete on a nested level
1306
1166 with greedy_completion():
1307 def completes_on_nested():
1167 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1308 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1168 _, matches = complete(line_buffer="d[1]['my_head']['")
1309 _, matches = complete(line_buffer="d[1]['my_head']['")
1169 self.assertTrue(any(["my_dt" in m for m in matches]))
1310 self.assertTrue(any(["my_dt" in m for m in matches]))
1170 self.assertTrue(any(["my_df" in m for m in matches]))
1311 self.assertTrue(any(["my_df" in m for m in matches]))
1312 # complete on a nested level
1313 with greedy_completion():
1314 completes_on_nested()
1315
1316 with evaluation_policy("limited"):
1317 completes_on_nested()
1318
1319 with evaluation_policy("minimal"):
1320 with pytest.raises(AssertionError):
1321 completes_on_nested()
1171
1322
1172 @dec.skip_without("pandas")
1323 @dec.skip_without("pandas")
1173 def test_dataframe_key_completion(self):
1324 def test_dataframe_key_completion(self):
@@ -1180,6 +1331,17 b' class TestCompleter(unittest.TestCase):'
1180 _, matches = complete(line_buffer="d['")
1331 _, matches = complete(line_buffer="d['")
1181 self.assertIn("hello", matches)
1332 self.assertIn("hello", matches)
1182 self.assertIn("world", matches)
1333 self.assertIn("world", matches)
1334 _, matches = complete(line_buffer="d.loc[:, '")
1335 self.assertIn("hello", matches)
1336 self.assertIn("world", matches)
1337 _, matches = complete(line_buffer="d.loc[1:, '")
1338 self.assertIn("hello", matches)
1339 _, matches = complete(line_buffer="d.loc[1:1, '")
1340 self.assertIn("hello", matches)
1341 _, matches = complete(line_buffer="d.loc[1:1:-1, '")
1342 self.assertIn("hello", matches)
1343 _, matches = complete(line_buffer="d.loc[::, '")
1344 self.assertIn("hello", matches)
1183
1345
1184 def test_dict_key_completion_invalids(self):
1346 def test_dict_key_completion_invalids(self):
1185 """Smoke test cases dict key completion can't handle"""
1347 """Smoke test cases dict key completion can't handle"""
@@ -1503,3 +1665,38 b' class TestCompleter(unittest.TestCase):'
1503 _(["completion_b"])
1665 _(["completion_b"])
1504 a_matcher.matcher_priority = 3
1666 a_matcher.matcher_priority = 3
1505 _(["completion_a"])
1667 _(["completion_a"])
1668
1669
1670 @pytest.mark.parametrize(
1671 "input, expected",
1672 [
1673 ["1.234", "1.234"],
1674 # should match signed numbers
1675 ["+1", "+1"],
1676 ["-1", "-1"],
1677 ["-1.0", "-1.0"],
1678 ["-1.", "-1."],
1679 ["+1.", "+1."],
1680 [".1", ".1"],
1681 # should not match non-numbers
1682 ["1..", None],
1683 ["..", None],
1684 [".1.", None],
1685 # should match after comma
1686 [",1", "1"],
1687 [", 1", "1"],
1688 [", .1", ".1"],
1689 [", +.1", "+.1"],
1690 # should not match after trailing spaces
1691 [".1 ", None],
1692 # some complex cases
1693 ["0b_0011_1111_0100_1110", "0b_0011_1111_0100_1110"],
1694 ["0xdeadbeef", "0xdeadbeef"],
1695 ["0b_1110_0101", "0b_1110_0101"],
1696 # should not match if in an operation
1697 ["1 + 1", None],
1698 [", 1 + 1", None],
1699 ],
1700 )
1701 def test_match_numeric_literal_for_dict_key(input, expected):
1702 assert _match_number_in_dict_key_prefix(input) == expected
@@ -147,7 +147,7 b' class TerminalMagics(Magics):'
147
147
148 sentinel = opts.get('s', u'--')
148 sentinel = opts.get('s', u'--')
149 block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
149 block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
150 self.store_or_execute(block, name, store_history=False)
150 self.store_or_execute(block, name, store_history=True)
151
151
152 @line_magic
152 @line_magic
153 def paste(self, parameter_s=''):
153 def paste(self, parameter_s=''):
@@ -100,17 +100,12 b' exclude ='
100 setupext
100 setupext
101
101
102 [options.package_data]
102 [options.package_data]
103 IPython = py.typed
103 IPython.core = profile/README*
104 IPython.core = profile/README*
104 IPython.core.tests = *.png, *.jpg, daft_extension/*.py
105 IPython.core.tests = *.png, *.jpg, daft_extension/*.py
105 IPython.lib.tests = *.wav
106 IPython.lib.tests = *.wav
106 IPython.testing.plugin = *.txt
107 IPython.testing.plugin = *.txt
107
108
108 [options.entry_points]
109 pygments.lexers =
110 ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer
111 ipython = IPython.lib.lexers:IPythonLexer
112 ipython3 = IPython.lib.lexers:IPython3Lexer
113
114 [velin]
109 [velin]
115 ignore_patterns =
110 ignore_patterns =
116 IPython/core/tests
111 IPython/core/tests
@@ -139,7 +139,15 b" setup_args['cmdclass'] = {"
139 'install_scripts_sym': install_scripts_for_symlink,
139 'install_scripts_sym': install_scripts_for_symlink,
140 'unsymlink': unsymlink,
140 'unsymlink': unsymlink,
141 }
141 }
142 setup_args["entry_points"] = {"console_scripts": find_entry_points()}
142
143 setup_args["entry_points"] = {
144 "console_scripts": find_entry_points(),
145 "pygments.lexers": [
146 "ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer",
147 "ipython = IPython.lib.lexers:IPythonLexer",
148 "ipython3 = IPython.lib.lexers:IPython3Lexer",
149 ],
150 }
143
151
144 #---------------------------------------------------------------------------
152 #---------------------------------------------------------------------------
145 # Do the actual setup now
153 # Do the actual setup now
General Comments 0
You need to be logged in to leave comments. Login now