##// END OF EJS Templates
Merge branch 'main' into main
Nelson Ferreira -
r28039:6fafd0a1 merge
parent child Browse files
Show More
This diff has been collapsed as it changes many lines, (738 lines changed) Show them Hide them
@@ -0,0 +1,738 b''
1 from typing import (
2 Any,
3 Callable,
4 Dict,
5 Set,
6 Sequence,
7 Tuple,
8 NamedTuple,
9 Type,
10 Literal,
11 Union,
12 TYPE_CHECKING,
13 )
14 import ast
15 import builtins
16 import collections
17 import operator
18 import sys
19 from functools import cached_property
20 from dataclasses import dataclass, field
21
22 from IPython.utils.docs import GENERATING_DOCUMENTATION
23 from IPython.utils.decorators import undoc
24
25
26 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
27 from typing_extensions import Protocol
28 else:
29 # do not require on runtime
30 Protocol = object # requires Python >=3.8
31
32
33 @undoc
34 class HasGetItem(Protocol):
35 def __getitem__(self, key) -> None:
36 ...
37
38
39 @undoc
40 class InstancesHaveGetItem(Protocol):
41 def __call__(self, *args, **kwargs) -> HasGetItem:
42 ...
43
44
45 @undoc
46 class HasGetAttr(Protocol):
47 def __getattr__(self, key) -> None:
48 ...
49
50
51 @undoc
52 class DoesNotHaveGetAttr(Protocol):
53 pass
54
55
56 # By default `__getattr__` is not explicitly implemented on most objects
57 MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr]
58
59
60 def _unbind_method(func: Callable) -> Union[Callable, None]:
61 """Get unbound method for given bound method.
62
63 Returns None if cannot get unbound method, or method is already unbound.
64 """
65 owner = getattr(func, "__self__", None)
66 owner_class = type(owner)
67 name = getattr(func, "__name__", None)
68 instance_dict_overrides = getattr(owner, "__dict__", None)
69 if (
70 owner is not None
71 and name
72 and (
73 not instance_dict_overrides
74 or (instance_dict_overrides and name not in instance_dict_overrides)
75 )
76 ):
77 return getattr(owner_class, name)
78 return None
79
80
81 @undoc
82 @dataclass
83 class EvaluationPolicy:
84 """Definition of evaluation policy."""
85
86 allow_locals_access: bool = False
87 allow_globals_access: bool = False
88 allow_item_access: bool = False
89 allow_attr_access: bool = False
90 allow_builtins_access: bool = False
91 allow_all_operations: bool = False
92 allow_any_calls: bool = False
93 allowed_calls: Set[Callable] = field(default_factory=set)
94
95 def can_get_item(self, value, item):
96 return self.allow_item_access
97
98 def can_get_attr(self, value, attr):
99 return self.allow_attr_access
100
101 def can_operate(self, dunders: Tuple[str, ...], a, b=None):
102 if self.allow_all_operations:
103 return True
104
105 def can_call(self, func):
106 if self.allow_any_calls:
107 return True
108
109 if func in self.allowed_calls:
110 return True
111
112 owner_method = _unbind_method(func)
113
114 if owner_method and owner_method in self.allowed_calls:
115 return True
116
117
118 def _get_external(module_name: str, access_path: Sequence[str]):
119 """Get value from external module given a dotted access path.
120
121 Raises:
122 * `KeyError` if module is removed not found, and
123 * `AttributeError` if acess path does not match an exported object
124 """
125 member_type = sys.modules[module_name]
126 for attr in access_path:
127 member_type = getattr(member_type, attr)
128 return member_type
129
130
131 def _has_original_dunder_external(
132 value,
133 module_name: str,
134 access_path: Sequence[str],
135 method_name: str,
136 ):
137 if module_name not in sys.modules:
138 # LBYLB as it is faster
139 return False
140 try:
141 member_type = _get_external(module_name, access_path)
142 value_type = type(value)
143 if type(value) == member_type:
144 return True
145 if method_name == "__getattribute__":
146 # we have to short-circuit here due to an unresolved issue in
147 # `isinstance` implementation: https://bugs.python.org/issue32683
148 return False
149 if isinstance(value, member_type):
150 method = getattr(value_type, method_name, None)
151 member_method = getattr(member_type, method_name, None)
152 if member_method == method:
153 return True
154 except (AttributeError, KeyError):
155 return False
156
157
158 def _has_original_dunder(
159 value, allowed_types, allowed_methods, allowed_external, method_name
160 ):
161 # note: Python ignores `__getattr__`/`__getitem__` on instances,
162 # we only need to check at class level
163 value_type = type(value)
164
165 # strict type check passes β†’ no need to check method
166 if value_type in allowed_types:
167 return True
168
169 method = getattr(value_type, method_name, None)
170
171 if method is None:
172 return None
173
174 if method in allowed_methods:
175 return True
176
177 for module_name, *access_path in allowed_external:
178 if _has_original_dunder_external(value, module_name, access_path, method_name):
179 return True
180
181 return False
182
183
184 @undoc
185 @dataclass
186 class SelectivePolicy(EvaluationPolicy):
187 allowed_getitem: Set[InstancesHaveGetItem] = field(default_factory=set)
188 allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set)
189
190 allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set)
191 allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set)
192
193 allowed_operations: Set = field(default_factory=set)
194 allowed_operations_external: Set[Tuple[str, ...]] = field(default_factory=set)
195
196 _operation_methods_cache: Dict[str, Set[Callable]] = field(
197 default_factory=dict, init=False
198 )
199
200 def can_get_attr(self, value, attr):
201 has_original_attribute = _has_original_dunder(
202 value,
203 allowed_types=self.allowed_getattr,
204 allowed_methods=self._getattribute_methods,
205 allowed_external=self.allowed_getattr_external,
206 method_name="__getattribute__",
207 )
208 has_original_attr = _has_original_dunder(
209 value,
210 allowed_types=self.allowed_getattr,
211 allowed_methods=self._getattr_methods,
212 allowed_external=self.allowed_getattr_external,
213 method_name="__getattr__",
214 )
215
216 accept = False
217
218 # Many objects do not have `__getattr__`, this is fine.
219 if has_original_attr is None and has_original_attribute:
220 accept = True
221 else:
222 # Accept objects without modifications to `__getattr__` and `__getattribute__`
223 accept = has_original_attr and has_original_attribute
224
225 if accept:
226 # We still need to check for overriden properties.
227
228 value_class = type(value)
229 if not hasattr(value_class, attr):
230 return True
231
232 class_attr_val = getattr(value_class, attr)
233 is_property = isinstance(class_attr_val, property)
234
235 if not is_property:
236 return True
237
238 # Properties in allowed types are ok (although we do not include any
239 # properties in our default allow list currently).
240 if type(value) in self.allowed_getattr:
241 return True # pragma: no cover
242
243 # Properties in subclasses of allowed types may be ok if not changed
244 for module_name, *access_path in self.allowed_getattr_external:
245 try:
246 external_class = _get_external(module_name, access_path)
247 external_class_attr_val = getattr(external_class, attr)
248 except (KeyError, AttributeError):
249 return False # pragma: no cover
250 return class_attr_val == external_class_attr_val
251
252 return False
253
254 def can_get_item(self, value, item):
255 """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified."""
256 return _has_original_dunder(
257 value,
258 allowed_types=self.allowed_getitem,
259 allowed_methods=self._getitem_methods,
260 allowed_external=self.allowed_getitem_external,
261 method_name="__getitem__",
262 )
263
264 def can_operate(self, dunders: Tuple[str, ...], a, b=None):
265 objects = [a]
266 if b is not None:
267 objects.append(b)
268 return all(
269 [
270 _has_original_dunder(
271 obj,
272 allowed_types=self.allowed_operations,
273 allowed_methods=self._operator_dunder_methods(dunder),
274 allowed_external=self.allowed_operations_external,
275 method_name=dunder,
276 )
277 for dunder in dunders
278 for obj in objects
279 ]
280 )
281
282 def _operator_dunder_methods(self, dunder: str) -> Set[Callable]:
283 if dunder not in self._operation_methods_cache:
284 self._operation_methods_cache[dunder] = self._safe_get_methods(
285 self.allowed_operations, dunder
286 )
287 return self._operation_methods_cache[dunder]
288
289 @cached_property
290 def _getitem_methods(self) -> Set[Callable]:
291 return self._safe_get_methods(self.allowed_getitem, "__getitem__")
292
293 @cached_property
294 def _getattr_methods(self) -> Set[Callable]:
295 return self._safe_get_methods(self.allowed_getattr, "__getattr__")
296
297 @cached_property
298 def _getattribute_methods(self) -> Set[Callable]:
299 return self._safe_get_methods(self.allowed_getattr, "__getattribute__")
300
301 def _safe_get_methods(self, classes, name) -> Set[Callable]:
302 return {
303 method
304 for class_ in classes
305 for method in [getattr(class_, name, None)]
306 if method
307 }
308
309
310 class _DummyNamedTuple(NamedTuple):
311 """Used internally to retrieve methods of named tuple instance."""
312
313
314 class EvaluationContext(NamedTuple):
315 #: Local namespace
316 locals: dict
317 #: Global namespace
318 globals: dict
319 #: Evaluation policy identifier
320 evaluation: Literal[
321 "forbidden", "minimal", "limited", "unsafe", "dangerous"
322 ] = "forbidden"
323 #: Whether the evalution of code takes place inside of a subscript.
324 #: Useful for evaluating ``:-1, 'col'`` in ``df[:-1, 'col']``.
325 in_subscript: bool = False
326
327
328 class _IdentitySubscript:
329 """Returns the key itself when item is requested via subscript."""
330
331 def __getitem__(self, key):
332 return key
333
334
335 IDENTITY_SUBSCRIPT = _IdentitySubscript()
336 SUBSCRIPT_MARKER = "__SUBSCRIPT_SENTINEL__"
337
338
339 class GuardRejection(Exception):
340 """Exception raised when guard rejects evaluation attempt."""
341
342 pass
343
344
345 def guarded_eval(code: str, context: EvaluationContext):
346 """Evaluate provided code in the evaluation context.
347
348 If evaluation policy given by context is set to ``forbidden``
349 no evaluation will be performed; if it is set to ``dangerous``
350 standard :func:`eval` will be used; finally, for any other,
351 policy :func:`eval_node` will be called on parsed AST.
352 """
353 locals_ = context.locals
354
355 if context.evaluation == "forbidden":
356 raise GuardRejection("Forbidden mode")
357
358 # note: not using `ast.literal_eval` as it does not implement
359 # getitem at all, for example it fails on simple `[0][1]`
360
361 if context.in_subscript:
362 # syntatic sugar for ellipsis (:) is only available in susbcripts
363 # so we need to trick the ast parser into thinking that we have
364 # a subscript, but we need to be able to later recognise that we did
365 # it so we can ignore the actual __getitem__ operation
366 if not code:
367 return tuple()
368 locals_ = locals_.copy()
369 locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT
370 code = SUBSCRIPT_MARKER + "[" + code + "]"
371 context = EvaluationContext(**{**context._asdict(), **{"locals": locals_}})
372
373 if context.evaluation == "dangerous":
374 return eval(code, context.globals, context.locals)
375
376 expression = ast.parse(code, mode="eval")
377
378 return eval_node(expression, context)
379
380
381 BINARY_OP_DUNDERS: Dict[Type[ast.operator], Tuple[str]] = {
382 ast.Add: ("__add__",),
383 ast.Sub: ("__sub__",),
384 ast.Mult: ("__mul__",),
385 ast.Div: ("__truediv__",),
386 ast.FloorDiv: ("__floordiv__",),
387 ast.Mod: ("__mod__",),
388 ast.Pow: ("__pow__",),
389 ast.LShift: ("__lshift__",),
390 ast.RShift: ("__rshift__",),
391 ast.BitOr: ("__or__",),
392 ast.BitXor: ("__xor__",),
393 ast.BitAnd: ("__and__",),
394 ast.MatMult: ("__matmul__",),
395 }
396
397 COMP_OP_DUNDERS: Dict[Type[ast.cmpop], Tuple[str, ...]] = {
398 ast.Eq: ("__eq__",),
399 ast.NotEq: ("__ne__", "__eq__"),
400 ast.Lt: ("__lt__", "__gt__"),
401 ast.LtE: ("__le__", "__ge__"),
402 ast.Gt: ("__gt__", "__lt__"),
403 ast.GtE: ("__ge__", "__le__"),
404 ast.In: ("__contains__",),
405 # Note: ast.Is, ast.IsNot, ast.NotIn are handled specially
406 }
407
408 UNARY_OP_DUNDERS: Dict[Type[ast.unaryop], Tuple[str, ...]] = {
409 ast.USub: ("__neg__",),
410 ast.UAdd: ("__pos__",),
411 # we have to check both __inv__ and __invert__!
412 ast.Invert: ("__invert__", "__inv__"),
413 ast.Not: ("__not__",),
414 }
415
416
417 def _find_dunder(node_op, dunders) -> Union[Tuple[str, ...], None]:
418 dunder = None
419 for op, candidate_dunder in dunders.items():
420 if isinstance(node_op, op):
421 dunder = candidate_dunder
422 return dunder
423
424
425 def eval_node(node: Union[ast.AST, None], context: EvaluationContext):
426 """Evaluate AST node in provided context.
427
428 Applies evaluation restrictions defined in the context. Currently does not support evaluation of functions with keyword arguments.
429
430 Does not evaluate actions that always have side effects:
431
432 - class definitions (``class sth: ...``)
433 - function definitions (``def sth: ...``)
434 - variable assignments (``x = 1``)
435 - augmented assignments (``x += 1``)
436 - deletions (``del x``)
437
438 Does not evaluate operations which do not return values:
439
440 - assertions (``assert x``)
441 - pass (``pass``)
442 - imports (``import x``)
443 - control flow:
444
445 - conditionals (``if x:``) except for ternary IfExp (``a if x else b``)
446 - loops (``for`` and `while``)
447 - exception handling
448
449 The purpose of this function is to guard against unwanted side-effects;
450 it does not give guarantees on protection from malicious code execution.
451 """
452 policy = EVALUATION_POLICIES[context.evaluation]
453 if node is None:
454 return None
455 if isinstance(node, ast.Expression):
456 return eval_node(node.body, context)
457 if isinstance(node, ast.BinOp):
458 left = eval_node(node.left, context)
459 right = eval_node(node.right, context)
460 dunders = _find_dunder(node.op, BINARY_OP_DUNDERS)
461 if dunders:
462 if policy.can_operate(dunders, left, right):
463 return getattr(left, dunders[0])(right)
464 else:
465 raise GuardRejection(
466 f"Operation (`{dunders}`) for",
467 type(left),
468 f"not allowed in {context.evaluation} mode",
469 )
470 if isinstance(node, ast.Compare):
471 left = eval_node(node.left, context)
472 all_true = True
473 negate = False
474 for op, right in zip(node.ops, node.comparators):
475 right = eval_node(right, context)
476 dunder = None
477 dunders = _find_dunder(op, COMP_OP_DUNDERS)
478 if not dunders:
479 if isinstance(op, ast.NotIn):
480 dunders = COMP_OP_DUNDERS[ast.In]
481 negate = True
482 if isinstance(op, ast.Is):
483 dunder = "is_"
484 if isinstance(op, ast.IsNot):
485 dunder = "is_"
486 negate = True
487 if not dunder and dunders:
488 dunder = dunders[0]
489 if dunder:
490 a, b = (right, left) if dunder == "__contains__" else (left, right)
491 if dunder == "is_" or dunders and policy.can_operate(dunders, a, b):
492 result = getattr(operator, dunder)(a, b)
493 if negate:
494 result = not result
495 if not result:
496 all_true = False
497 left = right
498 else:
499 raise GuardRejection(
500 f"Comparison (`{dunder}`) for",
501 type(left),
502 f"not allowed in {context.evaluation} mode",
503 )
504 else:
505 raise ValueError(
506 f"Comparison `{dunder}` not supported"
507 ) # pragma: no cover
508 return all_true
509 if isinstance(node, ast.Constant):
510 return node.value
511 if isinstance(node, ast.Index):
512 # deprecated since Python 3.9
513 return eval_node(node.value, context) # pragma: no cover
514 if isinstance(node, ast.Tuple):
515 return tuple(eval_node(e, context) for e in node.elts)
516 if isinstance(node, ast.List):
517 return [eval_node(e, context) for e in node.elts]
518 if isinstance(node, ast.Set):
519 return {eval_node(e, context) for e in node.elts}
520 if isinstance(node, ast.Dict):
521 return dict(
522 zip(
523 [eval_node(k, context) for k in node.keys],
524 [eval_node(v, context) for v in node.values],
525 )
526 )
527 if isinstance(node, ast.Slice):
528 return slice(
529 eval_node(node.lower, context),
530 eval_node(node.upper, context),
531 eval_node(node.step, context),
532 )
533 if isinstance(node, ast.ExtSlice):
534 # deprecated since Python 3.9
535 return tuple([eval_node(dim, context) for dim in node.dims]) # pragma: no cover
536 if isinstance(node, ast.UnaryOp):
537 value = eval_node(node.operand, context)
538 dunders = _find_dunder(node.op, UNARY_OP_DUNDERS)
539 if dunders:
540 if policy.can_operate(dunders, value):
541 return getattr(value, dunders[0])()
542 else:
543 raise GuardRejection(
544 f"Operation (`{dunders}`) for",
545 type(value),
546 f"not allowed in {context.evaluation} mode",
547 )
548 if isinstance(node, ast.Subscript):
549 value = eval_node(node.value, context)
550 slice_ = eval_node(node.slice, context)
551 if policy.can_get_item(value, slice_):
552 return value[slice_]
553 raise GuardRejection(
554 "Subscript access (`__getitem__`) for",
555 type(value), # not joined to avoid calling `repr`
556 f" not allowed in {context.evaluation} mode",
557 )
558 if isinstance(node, ast.Name):
559 if policy.allow_locals_access and node.id in context.locals:
560 return context.locals[node.id]
561 if policy.allow_globals_access and node.id in context.globals:
562 return context.globals[node.id]
563 if policy.allow_builtins_access and hasattr(builtins, node.id):
564 # note: do not use __builtins__, it is implementation detail of cPython
565 return getattr(builtins, node.id)
566 if not policy.allow_globals_access and not policy.allow_locals_access:
567 raise GuardRejection(
568 f"Namespace access not allowed in {context.evaluation} mode"
569 )
570 else:
571 raise NameError(f"{node.id} not found in locals, globals, nor builtins")
572 if isinstance(node, ast.Attribute):
573 value = eval_node(node.value, context)
574 if policy.can_get_attr(value, node.attr):
575 return getattr(value, node.attr)
576 raise GuardRejection(
577 "Attribute access (`__getattr__`) for",
578 type(value), # not joined to avoid calling `repr`
579 f"not allowed in {context.evaluation} mode",
580 )
581 if isinstance(node, ast.IfExp):
582 test = eval_node(node.test, context)
583 if test:
584 return eval_node(node.body, context)
585 else:
586 return eval_node(node.orelse, context)
587 if isinstance(node, ast.Call):
588 func = eval_node(node.func, context)
589 if policy.can_call(func) and not node.keywords:
590 args = [eval_node(arg, context) for arg in node.args]
591 return func(*args)
592 raise GuardRejection(
593 "Call for",
594 func, # not joined to avoid calling `repr`
595 f"not allowed in {context.evaluation} mode",
596 )
597 raise ValueError("Unhandled node", ast.dump(node))
598
599
600 SUPPORTED_EXTERNAL_GETITEM = {
601 ("pandas", "core", "indexing", "_iLocIndexer"),
602 ("pandas", "core", "indexing", "_LocIndexer"),
603 ("pandas", "DataFrame"),
604 ("pandas", "Series"),
605 ("numpy", "ndarray"),
606 ("numpy", "void"),
607 }
608
609
610 BUILTIN_GETITEM: Set[InstancesHaveGetItem] = {
611 dict,
612 str,
613 bytes,
614 list,
615 tuple,
616 collections.defaultdict,
617 collections.deque,
618 collections.OrderedDict,
619 collections.ChainMap,
620 collections.UserDict,
621 collections.UserList,
622 collections.UserString,
623 _DummyNamedTuple,
624 _IdentitySubscript,
625 }
626
627
628 def _list_methods(cls, source=None):
629 """For use on immutable objects or with methods returning a copy"""
630 return [getattr(cls, k) for k in (source if source else dir(cls))]
631
632
633 dict_non_mutating_methods = ("copy", "keys", "values", "items")
634 list_non_mutating_methods = ("copy", "index", "count")
635 set_non_mutating_methods = set(dir(set)) & set(dir(frozenset))
636
637
638 dict_keys: Type[collections.abc.KeysView] = type({}.keys())
639 method_descriptor: Any = type(list.copy)
640
641 NUMERICS = {int, float, complex}
642
643 ALLOWED_CALLS = {
644 bytes,
645 *_list_methods(bytes),
646 dict,
647 *_list_methods(dict, dict_non_mutating_methods),
648 dict_keys.isdisjoint,
649 list,
650 *_list_methods(list, list_non_mutating_methods),
651 set,
652 *_list_methods(set, set_non_mutating_methods),
653 frozenset,
654 *_list_methods(frozenset),
655 range,
656 str,
657 *_list_methods(str),
658 tuple,
659 *_list_methods(tuple),
660 *NUMERICS,
661 *[method for numeric_cls in NUMERICS for method in _list_methods(numeric_cls)],
662 collections.deque,
663 *_list_methods(collections.deque, list_non_mutating_methods),
664 collections.defaultdict,
665 *_list_methods(collections.defaultdict, dict_non_mutating_methods),
666 collections.OrderedDict,
667 *_list_methods(collections.OrderedDict, dict_non_mutating_methods),
668 collections.UserDict,
669 *_list_methods(collections.UserDict, dict_non_mutating_methods),
670 collections.UserList,
671 *_list_methods(collections.UserList, list_non_mutating_methods),
672 collections.UserString,
673 *_list_methods(collections.UserString, dir(str)),
674 collections.Counter,
675 *_list_methods(collections.Counter, dict_non_mutating_methods),
676 collections.Counter.elements,
677 collections.Counter.most_common,
678 }
679
680 BUILTIN_GETATTR: Set[MayHaveGetattr] = {
681 *BUILTIN_GETITEM,
682 set,
683 frozenset,
684 object,
685 type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`.
686 *NUMERICS,
687 dict_keys,
688 method_descriptor,
689 }
690
691
692 BUILTIN_OPERATIONS = {*BUILTIN_GETATTR}
693
694 EVALUATION_POLICIES = {
695 "minimal": EvaluationPolicy(
696 allow_builtins_access=True,
697 allow_locals_access=False,
698 allow_globals_access=False,
699 allow_item_access=False,
700 allow_attr_access=False,
701 allowed_calls=set(),
702 allow_any_calls=False,
703 allow_all_operations=False,
704 ),
705 "limited": SelectivePolicy(
706 allowed_getitem=BUILTIN_GETITEM,
707 allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM,
708 allowed_getattr=BUILTIN_GETATTR,
709 allowed_getattr_external={
710 # pandas Series/Frame implements custom `__getattr__`
711 ("pandas", "DataFrame"),
712 ("pandas", "Series"),
713 },
714 allowed_operations=BUILTIN_OPERATIONS,
715 allow_builtins_access=True,
716 allow_locals_access=True,
717 allow_globals_access=True,
718 allowed_calls=ALLOWED_CALLS,
719 ),
720 "unsafe": EvaluationPolicy(
721 allow_builtins_access=True,
722 allow_locals_access=True,
723 allow_globals_access=True,
724 allow_attr_access=True,
725 allow_item_access=True,
726 allow_any_calls=True,
727 allow_all_operations=True,
728 ),
729 }
730
731
732 __all__ = [
733 "guarded_eval",
734 "eval_node",
735 "GuardRejection",
736 "EvaluationContext",
737 "_unbind_method",
738 ]
This diff has been collapsed as it changes many lines, (570 lines changed) Show them Hide them
@@ -0,0 +1,570 b''
1 from contextlib import contextmanager
2 from typing import NamedTuple
3 from functools import partial
4 from IPython.core.guarded_eval import (
5 EvaluationContext,
6 GuardRejection,
7 guarded_eval,
8 _unbind_method,
9 )
10 from IPython.testing import decorators as dec
11 import pytest
12
13
14 def create_context(evaluation: str, **kwargs):
15 return EvaluationContext(locals=kwargs, globals={}, evaluation=evaluation)
16
17
18 forbidden = partial(create_context, "forbidden")
19 minimal = partial(create_context, "minimal")
20 limited = partial(create_context, "limited")
21 unsafe = partial(create_context, "unsafe")
22 dangerous = partial(create_context, "dangerous")
23
24 LIMITED_OR_HIGHER = [limited, unsafe, dangerous]
25 MINIMAL_OR_HIGHER = [minimal, *LIMITED_OR_HIGHER]
26
27
28 @contextmanager
29 def module_not_installed(module: str):
30 import sys
31
32 try:
33 to_restore = sys.modules[module]
34 del sys.modules[module]
35 except KeyError:
36 to_restore = None
37 try:
38 yield
39 finally:
40 sys.modules[module] = to_restore
41
42
43 def test_external_not_installed():
44 """
45 Because attribute check requires checking if object is not of allowed
46 external type, this tests logic for absence of external module.
47 """
48
49 class Custom:
50 def __init__(self):
51 self.test = 1
52
53 def __getattr__(self, key):
54 return key
55
56 with module_not_installed("pandas"):
57 context = limited(x=Custom())
58 with pytest.raises(GuardRejection):
59 guarded_eval("x.test", context)
60
61
62 @dec.skip_without("pandas")
63 def test_external_changed_api(monkeypatch):
64 """Check that the execution rejects if external API changed paths"""
65 import pandas as pd
66
67 series = pd.Series([1], index=["a"])
68
69 with monkeypatch.context() as m:
70 m.delattr(pd, "Series")
71 context = limited(data=series)
72 with pytest.raises(GuardRejection):
73 guarded_eval("data.iloc[0]", context)
74
75
76 @dec.skip_without("pandas")
77 def test_pandas_series_iloc():
78 import pandas as pd
79
80 series = pd.Series([1], index=["a"])
81 context = limited(data=series)
82 assert guarded_eval("data.iloc[0]", context) == 1
83
84
85 def test_rejects_custom_properties():
86 class BadProperty:
87 @property
88 def iloc(self):
89 return [None]
90
91 series = BadProperty()
92 context = limited(data=series)
93
94 with pytest.raises(GuardRejection):
95 guarded_eval("data.iloc[0]", context)
96
97
98 @dec.skip_without("pandas")
99 def test_accepts_non_overriden_properties():
100 import pandas as pd
101
102 class GoodProperty(pd.Series):
103 pass
104
105 series = GoodProperty([1], index=["a"])
106 context = limited(data=series)
107
108 assert guarded_eval("data.iloc[0]", context) == 1
109
110
111 @dec.skip_without("pandas")
112 def test_pandas_series():
113 import pandas as pd
114
115 context = limited(data=pd.Series([1], index=["a"]))
116 assert guarded_eval('data["a"]', context) == 1
117 with pytest.raises(KeyError):
118 guarded_eval('data["c"]', context)
119
120
121 @dec.skip_without("pandas")
122 def test_pandas_bad_series():
123 import pandas as pd
124
125 class BadItemSeries(pd.Series):
126 def __getitem__(self, key):
127 return "CUSTOM_ITEM"
128
129 class BadAttrSeries(pd.Series):
130 def __getattr__(self, key):
131 return "CUSTOM_ATTR"
132
133 bad_series = BadItemSeries([1], index=["a"])
134 context = limited(data=bad_series)
135
136 with pytest.raises(GuardRejection):
137 guarded_eval('data["a"]', context)
138 with pytest.raises(GuardRejection):
139 guarded_eval('data["c"]', context)
140
141 # note: here result is a bit unexpected because
142 # pandas `__getattr__` calls `__getitem__`;
143 # FIXME - special case to handle it?
144 assert guarded_eval("data.a", context) == "CUSTOM_ITEM"
145
146 context = unsafe(data=bad_series)
147 assert guarded_eval('data["a"]', context) == "CUSTOM_ITEM"
148
149 bad_attr_series = BadAttrSeries([1], index=["a"])
150 context = limited(data=bad_attr_series)
151 assert guarded_eval('data["a"]', context) == 1
152 with pytest.raises(GuardRejection):
153 guarded_eval("data.a", context)
154
155
156 @dec.skip_without("pandas")
157 def test_pandas_dataframe_loc():
158 import pandas as pd
159 from pandas.testing import assert_series_equal
160
161 data = pd.DataFrame([{"a": 1}])
162 context = limited(data=data)
163 assert_series_equal(guarded_eval('data.loc[:, "a"]', context), data["a"])
164
165
166 def test_named_tuple():
167 class GoodNamedTuple(NamedTuple):
168 a: str
169 pass
170
171 class BadNamedTuple(NamedTuple):
172 a: str
173
174 def __getitem__(self, key):
175 return None
176
177 good = GoodNamedTuple(a="x")
178 bad = BadNamedTuple(a="x")
179
180 context = limited(data=good)
181 assert guarded_eval("data[0]", context) == "x"
182
183 context = limited(data=bad)
184 with pytest.raises(GuardRejection):
185 guarded_eval("data[0]", context)
186
187
188 def test_dict():
189 context = limited(data={"a": 1, "b": {"x": 2}, ("x", "y"): 3})
190 assert guarded_eval('data["a"]', context) == 1
191 assert guarded_eval('data["b"]', context) == {"x": 2}
192 assert guarded_eval('data["b"]["x"]', context) == 2
193 assert guarded_eval('data["x", "y"]', context) == 3
194
195 assert guarded_eval("data.keys", context)
196
197
198 def test_set():
199 context = limited(data={"a", "b"})
200 assert guarded_eval("data.difference", context)
201
202
203 def test_list():
204 context = limited(data=[1, 2, 3])
205 assert guarded_eval("data[1]", context) == 2
206 assert guarded_eval("data.copy", context)
207
208
209 def test_dict_literal():
210 context = limited()
211 assert guarded_eval("{}", context) == {}
212 assert guarded_eval('{"a": 1}', context) == {"a": 1}
213
214
215 def test_list_literal():
216 context = limited()
217 assert guarded_eval("[]", context) == []
218 assert guarded_eval('[1, "a"]', context) == [1, "a"]
219
220
221 def test_set_literal():
222 context = limited()
223 assert guarded_eval("set()", context) == set()
224 assert guarded_eval('{"a"}', context) == {"a"}
225
226
227 def test_evaluates_if_expression():
228 context = limited()
229 assert guarded_eval("2 if True else 3", context) == 2
230 assert guarded_eval("4 if False else 5", context) == 5
231
232
233 def test_object():
234 obj = object()
235 context = limited(obj=obj)
236 assert guarded_eval("obj.__dir__", context) == obj.__dir__
237
238
239 @pytest.mark.parametrize(
240 "code,expected",
241 [
242 ["int.numerator", int.numerator],
243 ["float.is_integer", float.is_integer],
244 ["complex.real", complex.real],
245 ],
246 )
247 def test_number_attributes(code, expected):
248 assert guarded_eval(code, limited()) == expected
249
250
251 def test_method_descriptor():
252 context = limited()
253 assert guarded_eval("list.copy.__name__", context) == "copy"
254
255
256 @pytest.mark.parametrize(
257 "data,good,bad,expected",
258 [
259 [[1, 2, 3], "data.index(2)", "data.append(4)", 1],
260 [{"a": 1}, "data.keys().isdisjoint({})", "data.update()", True],
261 ],
262 )
263 def test_evaluates_calls(data, good, bad, expected):
264 context = limited(data=data)
265 assert guarded_eval(good, context) == expected
266
267 with pytest.raises(GuardRejection):
268 guarded_eval(bad, context)
269
270
271 @pytest.mark.parametrize(
272 "code,expected",
273 [
274 ["(1\n+\n1)", 2],
275 ["list(range(10))[-1:]", [9]],
276 ["list(range(20))[3:-2:3]", [3, 6, 9, 12, 15]],
277 ],
278 )
279 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
280 def test_evaluates_complex_cases(code, expected, context):
281 assert guarded_eval(code, context()) == expected
282
283
284 @pytest.mark.parametrize(
285 "code,expected",
286 [
287 ["1", 1],
288 ["1.0", 1.0],
289 ["0xdeedbeef", 0xDEEDBEEF],
290 ["True", True],
291 ["None", None],
292 ["{}", {}],
293 ["[]", []],
294 ],
295 )
296 @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
297 def test_evaluates_literals(code, expected, context):
298 assert guarded_eval(code, context()) == expected
299
300
301 @pytest.mark.parametrize(
302 "code,expected",
303 [
304 ["-5", -5],
305 ["+5", +5],
306 ["~5", -6],
307 ],
308 )
309 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
310 def test_evaluates_unary_operations(code, expected, context):
311 assert guarded_eval(code, context()) == expected
312
313
314 @pytest.mark.parametrize(
315 "code,expected",
316 [
317 ["1 + 1", 2],
318 ["3 - 1", 2],
319 ["2 * 3", 6],
320 ["5 // 2", 2],
321 ["5 / 2", 2.5],
322 ["5**2", 25],
323 ["2 >> 1", 1],
324 ["2 << 1", 4],
325 ["1 | 2", 3],
326 ["1 & 1", 1],
327 ["1 & 2", 0],
328 ],
329 )
330 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
331 def test_evaluates_binary_operations(code, expected, context):
332 assert guarded_eval(code, context()) == expected
333
334
335 @pytest.mark.parametrize(
336 "code,expected",
337 [
338 ["2 > 1", True],
339 ["2 < 1", False],
340 ["2 <= 1", False],
341 ["2 <= 2", True],
342 ["1 >= 2", False],
343 ["2 >= 2", True],
344 ["2 == 2", True],
345 ["1 == 2", False],
346 ["1 != 2", True],
347 ["1 != 1", False],
348 ["1 < 4 < 3", False],
349 ["(1 < 4) < 3", True],
350 ["4 > 3 > 2 > 1", True],
351 ["4 > 3 > 2 > 9", False],
352 ["1 < 2 < 3 < 4", True],
353 ["9 < 2 < 3 < 4", False],
354 ["1 < 2 > 1 > 0 > -1 < 1", True],
355 ["1 in [1] in [[1]]", True],
356 ["1 in [1] in [[2]]", False],
357 ["1 in [1]", True],
358 ["0 in [1]", False],
359 ["1 not in [1]", False],
360 ["0 not in [1]", True],
361 ["True is True", True],
362 ["False is False", True],
363 ["True is False", False],
364 ["True is not True", False],
365 ["False is not True", True],
366 ],
367 )
368 @pytest.mark.parametrize("context", LIMITED_OR_HIGHER)
369 def test_evaluates_comparisons(code, expected, context):
370 assert guarded_eval(code, context()) == expected
371
372
373 def test_guards_comparisons():
374 class GoodEq(int):
375 pass
376
377 class BadEq(int):
378 def __eq__(self, other):
379 assert False
380
381 context = limited(bad=BadEq(1), good=GoodEq(1))
382
383 with pytest.raises(GuardRejection):
384 guarded_eval("bad == 1", context)
385
386 with pytest.raises(GuardRejection):
387 guarded_eval("bad != 1", context)
388
389 with pytest.raises(GuardRejection):
390 guarded_eval("1 == bad", context)
391
392 with pytest.raises(GuardRejection):
393 guarded_eval("1 != bad", context)
394
395 assert guarded_eval("good == 1", context) is True
396 assert guarded_eval("good != 1", context) is False
397 assert guarded_eval("1 == good", context) is True
398 assert guarded_eval("1 != good", context) is False
399
400
401 def test_guards_unary_operations():
402 class GoodOp(int):
403 pass
404
405 class BadOpInv(int):
406 def __inv__(self, other):
407 assert False
408
409 class BadOpInverse(int):
410 def __inv__(self, other):
411 assert False
412
413 context = limited(good=GoodOp(1), bad1=BadOpInv(1), bad2=BadOpInverse(1))
414
415 with pytest.raises(GuardRejection):
416 guarded_eval("~bad1", context)
417
418 with pytest.raises(GuardRejection):
419 guarded_eval("~bad2", context)
420
421
422 def test_guards_binary_operations():
423 class GoodOp(int):
424 pass
425
426 class BadOp(int):
427 def __add__(self, other):
428 assert False
429
430 context = limited(good=GoodOp(1), bad=BadOp(1))
431
432 with pytest.raises(GuardRejection):
433 guarded_eval("1 + bad", context)
434
435 with pytest.raises(GuardRejection):
436 guarded_eval("bad + 1", context)
437
438 assert guarded_eval("good + 1", context) == 2
439 assert guarded_eval("1 + good", context) == 2
440
441
442 def test_guards_attributes():
443 class GoodAttr(float):
444 pass
445
446 class BadAttr1(float):
447 def __getattr__(self, key):
448 assert False
449
450 class BadAttr2(float):
451 def __getattribute__(self, key):
452 assert False
453
454 context = limited(good=GoodAttr(0.5), bad1=BadAttr1(0.5), bad2=BadAttr2(0.5))
455
456 with pytest.raises(GuardRejection):
457 guarded_eval("bad1.as_integer_ratio", context)
458
459 with pytest.raises(GuardRejection):
460 guarded_eval("bad2.as_integer_ratio", context)
461
462 assert guarded_eval("good.as_integer_ratio()", context) == (1, 2)
463
464
465 @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER)
466 def test_access_builtins(context):
467 assert guarded_eval("round", context()) == round
468
469
470 def test_access_builtins_fails():
471 context = limited()
472 with pytest.raises(NameError):
473 guarded_eval("this_is_not_builtin", context)
474
475
476 def test_rejects_forbidden():
477 context = forbidden()
478 with pytest.raises(GuardRejection):
479 guarded_eval("1", context)
480
481
482 def test_guards_locals_and_globals():
483 context = EvaluationContext(
484 locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="minimal"
485 )
486
487 with pytest.raises(GuardRejection):
488 guarded_eval("local_a", context)
489
490 with pytest.raises(GuardRejection):
491 guarded_eval("global_b", context)
492
493
494 def test_access_locals_and_globals():
495 context = EvaluationContext(
496 locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="limited"
497 )
498 assert guarded_eval("local_a", context) == "a"
499 assert guarded_eval("global_b", context) == "b"
500
501
502 @pytest.mark.parametrize(
503 "code",
504 ["def func(): pass", "class C: pass", "x = 1", "x += 1", "del x", "import ast"],
505 )
506 @pytest.mark.parametrize("context", [minimal(), limited(), unsafe()])
507 def test_rejects_side_effect_syntax(code, context):
508 with pytest.raises(SyntaxError):
509 guarded_eval(code, context)
510
511
512 def test_subscript():
513 context = EvaluationContext(
514 locals={}, globals={}, evaluation="limited", in_subscript=True
515 )
516 empty_slice = slice(None, None, None)
517 assert guarded_eval("", context) == tuple()
518 assert guarded_eval(":", context) == empty_slice
519 assert guarded_eval("1:2:3", context) == slice(1, 2, 3)
520 assert guarded_eval(':, "a"', context) == (empty_slice, "a")
521
522
523 def test_unbind_method():
524 class X(list):
525 def index(self, k):
526 return "CUSTOM"
527
528 x = X()
529 assert _unbind_method(x.index) is X.index
530 assert _unbind_method([].index) is list.index
531 assert _unbind_method(list.index) is None
532
533
534 def test_assumption_instance_attr_do_not_matter():
535 """This is semi-specified in Python documentation.
536
537 However, since the specification says 'not guaranted
538 to work' rather than 'is forbidden to work', future
539 versions could invalidate this assumptions. This test
540 is meant to catch such a change if it ever comes true.
541 """
542
543 class T:
544 def __getitem__(self, k):
545 return "a"
546
547 def __getattr__(self, k):
548 return "a"
549
550 def f(self):
551 return "b"
552
553 t = T()
554 t.__getitem__ = f
555 t.__getattr__ = f
556 assert t[1] == "a"
557 assert t[1] == "a"
558
559
560 def test_assumption_named_tuples_share_getitem():
561 """Check assumption on named tuples sharing __getitem__"""
562 from typing import NamedTuple
563
564 class A(NamedTuple):
565 pass
566
567 class B(NamedTuple):
568 pass
569
570 assert A.__getitem__ == B.__getitem__
@@ -0,0 +1,26 b''
1 from typing import List
2
3 import pytest
4 import pygments.lexers
5 import pygments.lexer
6
7 from IPython.lib.lexers import IPythonConsoleLexer, IPythonLexer, IPython3Lexer
8
9 #: the human-readable names of the IPython lexers with ``entry_points``
10 EXPECTED_LEXER_NAMES = [
11 cls.name for cls in [IPythonConsoleLexer, IPythonLexer, IPython3Lexer]
12 ]
13
14
15 @pytest.fixture
16 def all_pygments_lexer_names() -> List[str]:
17 """Get all lexer names registered in pygments."""
18 return {l[0] for l in pygments.lexers.get_all_lexers()}
19
20
21 @pytest.mark.parametrize("expected_lexer", EXPECTED_LEXER_NAMES)
22 def test_pygments_entry_points(
23 expected_lexer: str, all_pygments_lexer_names: List[str]
24 ) -> None:
25 """Check whether the ``entry_points`` for ``pygments.lexers`` are correct."""
26 assert expected_lexer in all_pygments_lexer_names
@@ -15,7 +15,7 b' jobs:'
15 runs-on: ubuntu-latest
15 runs-on: ubuntu-latest
16 strategy:
16 strategy:
17 matrix:
17 matrix:
18 python-version: [3.8]
18 python-version: ["3.x"]
19
19
20 steps:
20 steps:
21 - uses: actions/checkout@v3
21 - uses: actions/checkout@v3
@@ -31,6 +31,8 b' jobs:'
31 run: |
31 run: |
32 mypy -p IPython.terminal
32 mypy -p IPython.terminal
33 mypy -p IPython.core.magics
33 mypy -p IPython.core.magics
34 mypy -p IPython.core.guarded_eval
35 mypy -p IPython.core.completer
34 - name: Lint with pyflakes
36 - name: Lint with pyflakes
35 run: |
37 run: |
36 flake8 IPython/core/magics/script.py
38 flake8 IPython/core/magics/script.py
@@ -19,7 +19,7 b' jobs:'
19 fail-fast: false
19 fail-fast: false
20 matrix:
20 matrix:
21 os: [ubuntu-latest, windows-latest]
21 os: [ubuntu-latest, windows-latest]
22 python-version: ["3.8", "3.9", "3.10"]
22 python-version: ["3.8", "3.9", "3.10", "3.11"]
23 deps: [test_extra]
23 deps: [test_extra]
24 # Test all on ubuntu, test ends on macos
24 # Test all on ubuntu, test ends on macos
25 include:
25 include:
@@ -27,15 +27,15 b' jobs:'
27 python-version: "3.8"
27 python-version: "3.8"
28 deps: test_extra
28 deps: test_extra
29 - os: macos-latest
29 - os: macos-latest
30 python-version: "3.10"
30 python-version: "3.11"
31 deps: test_extra
31 deps: test_extra
32 # Tests minimal dependencies set
32 # Tests minimal dependencies set
33 - os: ubuntu-latest
33 - os: ubuntu-latest
34 python-version: "3.10"
34 python-version: "3.11"
35 deps: test
35 deps: test
36 # Tests latest development Python version
36 # Tests latest development Python version
37 - os: ubuntu-latest
37 - os: ubuntu-latest
38 python-version: "3.11-dev"
38 python-version: "3.12-dev"
39 deps: test
39 deps: test
40 # Installing optional dependencies stuff takes ages on PyPy
40 # Installing optional dependencies stuff takes ages on PyPy
41 - os: ubuntu-latest
41 - os: ubuntu-latest
@@ -1,3 +1,4 b''
1 # PYTHON_ARGCOMPLETE_OK
1 """
2 """
2 IPython: tools for interactive and parallel computing in Python.
3 IPython: tools for interactive and parallel computing in Python.
3
4
@@ -1,3 +1,4 b''
1 # PYTHON_ARGCOMPLETE_OK
1 # encoding: utf-8
2 # encoding: utf-8
2 """Terminal-based IPython entry point.
3 """Terminal-based IPython entry point.
3 """
4 """
@@ -123,9 +123,8 b' class ProfileAwareConfigLoader(PyFileConfigLoader):'
123 return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
123 return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path)
124
124
125 class BaseIPythonApplication(Application):
125 class BaseIPythonApplication(Application):
126
126 name = "ipython"
127 name = u'ipython'
127 description = "IPython: an enhanced interactive Python shell."
128 description = Unicode(u'IPython: an enhanced interactive Python shell.')
129 version = Unicode(release.version)
128 version = Unicode(release.version)
130
129
131 aliases = base_aliases
130 aliases = base_aliases
This diff has been collapsed as it changes many lines, (699 lines changed) Show them Hide them
@@ -50,7 +50,7 b' Backward latex completion'
50
50
51 It is sometime challenging to know how to type a character, if you are using
51 It is sometime challenging to know how to type a character, if you are using
52 IPython, or any compatible frontend you can prepend backslash to the character
52 IPython, or any compatible frontend you can prepend backslash to the character
53 and press ``<tab>`` to expand it to its latex form.
53 and press :kbd:`Tab` to expand it to its latex form.
54
54
55 .. code::
55 .. code::
56
56
@@ -59,7 +59,8 b' and press ``<tab>`` to expand it to its latex form.'
59
59
60
60
61 Both forward and backward completions can be deactivated by setting the
61 Both forward and backward completions can be deactivated by setting the
62 ``Completer.backslash_combining_completions`` option to ``False``.
62 :std:configtrait:`Completer.backslash_combining_completions` option to
63 ``False``.
63
64
64
65
65 Experimental
66 Experimental
@@ -95,7 +96,7 b' having to execute any code:'
95 ... myvar[1].bi<tab>
96 ... myvar[1].bi<tab>
96
97
97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 executing any code unlike the previously available ``IPCompleter.greedy``
99 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
99 option.
100 option.
100
101
101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 Be sure to update :any:`jedi` to the latest stable version or to try the
@@ -166,7 +167,7 b' this can be achieved by adding a list of identifiers of matchers which'
166 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
167 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
167
168
168 The suppression behaviour can is user-configurable via
169 The suppression behaviour can is user-configurable via
169 :any:`IPCompleter.suppress_competing_matchers`.
170 :std:configtrait:`IPCompleter.suppress_competing_matchers`.
170 """
171 """
171
172
172
173
@@ -178,6 +179,7 b' The suppression behaviour can is user-configurable via'
178
179
179 from __future__ import annotations
180 from __future__ import annotations
180 import builtins as builtin_mod
181 import builtins as builtin_mod
182 import enum
181 import glob
183 import glob
182 import inspect
184 import inspect
183 import itertools
185 import itertools
@@ -186,14 +188,16 b' import os'
186 import re
188 import re
187 import string
189 import string
188 import sys
190 import sys
191 import tokenize
189 import time
192 import time
190 import unicodedata
193 import unicodedata
191 import uuid
194 import uuid
192 import warnings
195 import warnings
196 from ast import literal_eval
197 from collections import defaultdict
193 from contextlib import contextmanager
198 from contextlib import contextmanager
194 from dataclasses import dataclass
199 from dataclasses import dataclass
195 from functools import cached_property, partial
200 from functools import cached_property, partial
196 from importlib import import_module
197 from types import SimpleNamespace
201 from types import SimpleNamespace
198 from typing import (
202 from typing import (
199 Iterable,
203 Iterable,
@@ -204,14 +208,15 b' from typing import ('
204 Any,
208 Any,
205 Sequence,
209 Sequence,
206 Dict,
210 Dict,
207 NamedTuple,
208 Pattern,
209 Optional,
211 Optional,
210 TYPE_CHECKING,
212 TYPE_CHECKING,
211 Set,
213 Set,
214 Sized,
215 TypeVar,
212 Literal,
216 Literal,
213 )
217 )
214
218
219 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
215 from IPython.core.error import TryNext
220 from IPython.core.error import TryNext
216 from IPython.core.inputtransformer2 import ESC_MAGIC
221 from IPython.core.inputtransformer2 import ESC_MAGIC
217 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
222 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
@@ -231,7 +236,6 b' from traitlets import ('
231 Unicode,
236 Unicode,
232 Dict as DictTrait,
237 Dict as DictTrait,
233 Union as UnionTrait,
238 Union as UnionTrait,
234 default,
235 observe,
239 observe,
236 )
240 )
237 from traitlets.config.configurable import Configurable
241 from traitlets.config.configurable import Configurable
@@ -252,12 +256,13 b' except ImportError:'
252 JEDI_INSTALLED = False
256 JEDI_INSTALLED = False
253
257
254
258
255 if TYPE_CHECKING or GENERATING_DOCUMENTATION:
259 if TYPE_CHECKING or GENERATING_DOCUMENTATION and sys.version_info >= (3, 11):
256 from typing import cast
260 from typing import cast
257 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias
261 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
258 else:
262 else:
263 from typing import Generic
259
264
260 def cast(obj, type_):
265 def cast(type_, obj):
261 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
266 """Workaround for `TypeError: MatcherAPIv2() takes no arguments`"""
262 return obj
267 return obj
263
268
@@ -266,6 +271,7 b' else:'
266 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
271 TypedDict = Dict # by extension of `NotRequired` requires 3.11 too
267 Protocol = object # requires Python >=3.8
272 Protocol = object # requires Python >=3.8
268 TypeAlias = Any # requires Python >=3.10
273 TypeAlias = Any # requires Python >=3.10
274 TypeGuard = Generic # requires Python >=3.10
269 if GENERATING_DOCUMENTATION:
275 if GENERATING_DOCUMENTATION:
270 from typing import TypedDict
276 from typing import TypedDict
271
277
@@ -279,7 +285,7 b' if GENERATING_DOCUMENTATION:'
279 # write this). With below range we cover them all, with a density of ~67%
285 # write this). With below range we cover them all, with a density of ~67%
280 # biggest next gap we consider only adds up about 1% density and there are 600
286 # biggest next gap we consider only adds up about 1% density and there are 600
281 # gaps that would need hard coding.
287 # gaps that would need hard coding.
282 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
288 _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
283
289
284 # Public API
290 # Public API
285 __all__ = ["Completer", "IPCompleter"]
291 __all__ = ["Completer", "IPCompleter"]
@@ -296,6 +302,9 b' MATCHES_LIMIT = 500'
296 # Completion type reported when no type can be inferred.
302 # Completion type reported when no type can be inferred.
297 _UNKNOWN_TYPE = "<unknown>"
303 _UNKNOWN_TYPE = "<unknown>"
298
304
305 # sentinel value to signal lack of a match
306 not_found = object()
307
299 class ProvisionalCompleterWarning(FutureWarning):
308 class ProvisionalCompleterWarning(FutureWarning):
300 """
309 """
301 Exception raise by an experimental feature in this module.
310 Exception raise by an experimental feature in this module.
@@ -466,8 +475,9 b' class _FakeJediCompletion:'
466 self.complete = name
475 self.complete = name
467 self.type = 'crashed'
476 self.type = 'crashed'
468 self.name_with_symbols = name
477 self.name_with_symbols = name
469 self.signature = ''
478 self.signature = ""
470 self._origin = 'fake'
479 self._origin = "fake"
480 self.text = "crashed"
471
481
472 def __repr__(self):
482 def __repr__(self):
473 return '<Fake completion object jedi has crashed>'
483 return '<Fake completion object jedi has crashed>'
@@ -503,11 +513,23 b' class Completion:'
503
513
504 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
514 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
505
515
506 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
516 def __init__(
507 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
517 self,
518 start: int,
519 end: int,
520 text: str,
521 *,
522 type: Optional[str] = None,
523 _origin="",
524 signature="",
525 ) -> None:
526 warnings.warn(
527 "``Completion`` is a provisional API (as of IPython 6.0). "
508 "It may change without warnings. "
528 "It may change without warnings. "
509 "Use in corresponding context manager.",
529 "Use in corresponding context manager.",
510 category=ProvisionalCompleterWarning, stacklevel=2)
530 category=ProvisionalCompleterWarning,
531 stacklevel=2,
532 )
511
533
512 self.start = start
534 self.start = start
513 self.end = end
535 self.end = end
@@ -520,7 +542,7 b' class Completion:'
520 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
542 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
521 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
543 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
522
544
523 def __eq__(self, other)->Bool:
545 def __eq__(self, other) -> bool:
524 """
546 """
525 Equality and hash do not hash the type (as some completer may not be
547 Equality and hash do not hash the type (as some completer may not be
526 able to infer the type), but are use to (partially) de-duplicate
548 able to infer the type), but are use to (partially) de-duplicate
@@ -554,7 +576,7 b' class SimpleCompletion:'
554
576
555 __slots__ = ["text", "type"]
577 __slots__ = ["text", "type"]
556
578
557 def __init__(self, text: str, *, type: str = None):
579 def __init__(self, text: str, *, type: Optional[str] = None):
558 self.text = text
580 self.text = text
559 self.type = type
581 self.type = type
560
582
@@ -588,14 +610,18 b' class SimpleMatcherResult(_MatcherResultBase, TypedDict):'
588 # in order to get __orig_bases__ for documentation
610 # in order to get __orig_bases__ for documentation
589
611
590 #: List of candidate completions
612 #: List of candidate completions
591 completions: Sequence[SimpleCompletion]
613 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
592
614
593
615
594 class _JediMatcherResult(_MatcherResultBase):
616 class _JediMatcherResult(_MatcherResultBase):
595 """Matching result returned by Jedi (will be processed differently)"""
617 """Matching result returned by Jedi (will be processed differently)"""
596
618
597 #: list of candidate completions
619 #: list of candidate completions
598 completions: Iterable[_JediCompletionLike]
620 completions: Iterator[_JediCompletionLike]
621
622
623 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
624 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
599
625
600
626
601 @dataclass
627 @dataclass
@@ -642,16 +668,21 b' MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]'
642
668
643
669
644 class _MatcherAPIv1Base(Protocol):
670 class _MatcherAPIv1Base(Protocol):
645 def __call__(self, text: str) -> list[str]:
671 def __call__(self, text: str) -> List[str]:
646 """Call signature."""
672 """Call signature."""
673 ...
674
675 #: Used to construct the default matcher identifier
676 __qualname__: str
647
677
648
678
649 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
679 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
650 #: API version
680 #: API version
651 matcher_api_version: Optional[Literal[1]]
681 matcher_api_version: Optional[Literal[1]]
652
682
653 def __call__(self, text: str) -> list[str]:
683 def __call__(self, text: str) -> List[str]:
654 """Call signature."""
684 """Call signature."""
685 ...
655
686
656
687
657 #: Protocol describing Matcher API v1.
688 #: Protocol describing Matcher API v1.
@@ -666,26 +697,61 b' class MatcherAPIv2(Protocol):'
666
697
667 def __call__(self, context: CompletionContext) -> MatcherResult:
698 def __call__(self, context: CompletionContext) -> MatcherResult:
668 """Call signature."""
699 """Call signature."""
700 ...
701
702 #: Used to construct the default matcher identifier
703 __qualname__: str
669
704
670
705
671 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
706 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
672
707
673
708
709 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
710 api_version = _get_matcher_api_version(matcher)
711 return api_version == 1
712
713
714 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
715 api_version = _get_matcher_api_version(matcher)
716 return api_version == 2
717
718
719 def _is_sizable(value: Any) -> TypeGuard[Sized]:
720 """Determines whether objects is sizable"""
721 return hasattr(value, "__len__")
722
723
724 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
725 """Determines whether objects is sizable"""
726 return hasattr(value, "__next__")
727
728
674 def has_any_completions(result: MatcherResult) -> bool:
729 def has_any_completions(result: MatcherResult) -> bool:
675 """Check if any result includes any completions."""
730 """Check if any result includes any completions."""
676 if hasattr(result["completions"], "__len__"):
731 completions = result["completions"]
677 return len(result["completions"]) != 0
732 if _is_sizable(completions):
733 return len(completions) != 0
734 if _is_iterator(completions):
678 try:
735 try:
679 old_iterator = result["completions"]
736 old_iterator = completions
680 first = next(old_iterator)
737 first = next(old_iterator)
681 result["completions"] = itertools.chain([first], old_iterator)
738 result["completions"] = cast(
739 Iterator[SimpleCompletion],
740 itertools.chain([first], old_iterator),
741 )
682 return True
742 return True
683 except StopIteration:
743 except StopIteration:
684 return False
744 return False
745 raise ValueError(
746 "Completions returned by matcher need to be an Iterator or a Sizable"
747 )
685
748
686
749
687 def completion_matcher(
750 def completion_matcher(
688 *, priority: float = None, identifier: str = None, api_version: int = 1
751 *,
752 priority: Optional[float] = None,
753 identifier: Optional[str] = None,
754 api_version: int = 1,
689 ):
755 ):
690 """Adds attributes describing the matcher.
756 """Adds attributes describing the matcher.
691
757
@@ -708,14 +774,14 b' def completion_matcher('
708 """
774 """
709
775
710 def wrapper(func: Matcher):
776 def wrapper(func: Matcher):
711 func.matcher_priority = priority or 0
777 func.matcher_priority = priority or 0 # type: ignore
712 func.matcher_identifier = identifier or func.__qualname__
778 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
713 func.matcher_api_version = api_version
779 func.matcher_api_version = api_version # type: ignore
714 if TYPE_CHECKING:
780 if TYPE_CHECKING:
715 if api_version == 1:
781 if api_version == 1:
716 func = cast(func, MatcherAPIv1)
782 func = cast(MatcherAPIv1, func)
717 elif api_version == 2:
783 elif api_version == 2:
718 func = cast(func, MatcherAPIv2)
784 func = cast(MatcherAPIv2, func)
719 return func
785 return func
720
786
721 return wrapper
787 return wrapper
@@ -902,12 +968,44 b' class CompletionSplitter(object):'
902
968
903 class Completer(Configurable):
969 class Completer(Configurable):
904
970
905 greedy = Bool(False,
971 greedy = Bool(
906 help="""Activate greedy completion
972 False,
907 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
973 help="""Activate greedy completion.
974
975 .. deprecated:: 8.8
976 Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
908
977
909 This will enable completion on elements of lists, results of function calls, etc.,
978 When enabled in IPython 8.8 or newer, changes configuration as follows:
910 but can be unsafe because the code is actually evaluated on TAB.
979
980 - ``Completer.evaluation = 'unsafe'``
981 - ``Completer.auto_close_dict_keys = True``
982 """,
983 ).tag(config=True)
984
985 evaluation = Enum(
986 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
987 default_value="limited",
988 help="""Policy for code evaluation under completion.
989
990 Successive options allow to enable more eager evaluation for better
991 completion suggestions, including for nested dictionaries, nested lists,
992 or even results of function calls.
993 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
994 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
995
996 Allowed values are:
997
998 - ``forbidden``: no evaluation of code is permitted,
999 - ``minimal``: evaluation of literals and access to built-in namespace;
1000 no item/attribute evaluationm no access to locals/globals,
1001 no evaluation of any operations or comparisons.
1002 - ``limited``: access to all namespaces, evaluation of hard-coded methods
1003 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
1004 :any:`object.__getitem__`) on allow-listed objects (for example:
1005 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
1006 - ``unsafe``: evaluation of all methods and function calls but not of
1007 syntax with side-effects like `del x`,
1008 - ``dangerous``: completely arbitrary evaluation.
911 """,
1009 """,
912 ).tag(config=True)
1010 ).tag(config=True)
913
1011
@@ -931,6 +1029,18 b' class Completer(Configurable):'
931 "Includes completion of latex commands, unicode names, and expanding "
1029 "Includes completion of latex commands, unicode names, and expanding "
932 "unicode characters back to latex commands.").tag(config=True)
1030 "unicode characters back to latex commands.").tag(config=True)
933
1031
1032 auto_close_dict_keys = Bool(
1033 False,
1034 help="""
1035 Enable auto-closing dictionary keys.
1036
1037 When enabled string keys will be suffixed with a final quote
1038 (matching the opening quote), tuple keys will also receive a
1039 separating comma if needed, and keys which are final will
1040 receive a closing bracket (``]``).
1041 """,
1042 ).tag(config=True)
1043
934 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1044 def __init__(self, namespace=None, global_namespace=None, **kwargs):
935 """Create a new completer for the command line.
1045 """Create a new completer for the command line.
936
1046
@@ -1029,26 +1139,14 b' class Completer(Configurable):'
1029 with a __getattr__ hook is evaluated.
1139 with a __getattr__ hook is evaluated.
1030
1140
1031 """
1141 """
1032
1033 # Another option, seems to work great. Catches things like ''.<tab>
1034 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
1035
1036 if m:
1037 expr, attr = m.group(1, 3)
1038 elif self.greedy:
1039 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1142 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1040 if not m2:
1143 if not m2:
1041 return []
1144 return []
1042 expr, attr = m2.group(1,2)
1145 expr, attr = m2.group(1, 2)
1043 else:
1044 return []
1045
1146
1046 try:
1147 obj = self._evaluate_expr(expr)
1047 obj = eval(expr, self.namespace)
1148
1048 except:
1149 if obj is not_found:
1049 try:
1050 obj = eval(expr, self.global_namespace)
1051 except:
1052 return []
1150 return []
1053
1151
1054 if self.limit_to__all__ and hasattr(obj, '__all__'):
1152 if self.limit_to__all__ and hasattr(obj, '__all__'):
@@ -1068,8 +1166,31 b' class Completer(Configurable):'
1068 pass
1166 pass
1069 # Build match list to return
1167 # Build match list to return
1070 n = len(attr)
1168 n = len(attr)
1071 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
1169 return ["%s.%s" % (expr, w) for w in words if w[:n] == attr]
1072
1170
1171 def _evaluate_expr(self, expr):
1172 obj = not_found
1173 done = False
1174 while not done and expr:
1175 try:
1176 obj = guarded_eval(
1177 expr,
1178 EvaluationContext(
1179 globals=self.global_namespace,
1180 locals=self.namespace,
1181 evaluation=self.evaluation,
1182 ),
1183 )
1184 done = True
1185 except Exception as e:
1186 if self.debug:
1187 print("Evaluation exception", e)
1188 # trim the expression to remove any invalid prefix
1189 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1190 # where parenthesis is not closed.
1191 # TODO: make this faster by reusing parts of the computation?
1192 expr = expr[1:]
1193 return obj
1073
1194
1074 def get__all__entries(obj):
1195 def get__all__entries(obj):
1075 """returns the strings in the __all__ attribute"""
1196 """returns the strings in the __all__ attribute"""
@@ -1081,8 +1202,82 b' def get__all__entries(obj):'
1081 return [w for w in words if isinstance(w, str)]
1202 return [w for w in words if isinstance(w, str)]
1082
1203
1083
1204
1084 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
1205 class _DictKeyState(enum.Flag):
1085 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
1206 """Represent state of the key match in context of other possible matches.
1207
1208 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1209 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1210 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1211 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1212 """
1213
1214 BASELINE = 0
1215 END_OF_ITEM = enum.auto()
1216 END_OF_TUPLE = enum.auto()
1217 IN_TUPLE = enum.auto()
1218
1219
1220 def _parse_tokens(c):
1221 """Parse tokens even if there is an error."""
1222 tokens = []
1223 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1224 while True:
1225 try:
1226 tokens.append(next(token_generator))
1227 except tokenize.TokenError:
1228 return tokens
1229 except StopIteration:
1230 return tokens
1231
1232
1233 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1234 """Match any valid Python numeric literal in a prefix of dictionary keys.
1235
1236 References:
1237 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1238 - https://docs.python.org/3/library/tokenize.html
1239 """
1240 if prefix[-1].isspace():
1241 # if user typed a space we do not have anything to complete
1242 # even if there was a valid number token before
1243 return None
1244 tokens = _parse_tokens(prefix)
1245 rev_tokens = reversed(tokens)
1246 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1247 number = None
1248 for token in rev_tokens:
1249 if token.type in skip_over:
1250 continue
1251 if number is None:
1252 if token.type == tokenize.NUMBER:
1253 number = token.string
1254 continue
1255 else:
1256 # we did not match a number
1257 return None
1258 if token.type == tokenize.OP:
1259 if token.string == ",":
1260 break
1261 if token.string in {"+", "-"}:
1262 number = token.string + number
1263 else:
1264 return None
1265 return number
1266
1267
1268 _INT_FORMATS = {
1269 "0b": bin,
1270 "0o": oct,
1271 "0x": hex,
1272 }
1273
1274
1275 def match_dict_keys(
1276 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1277 prefix: str,
1278 delims: str,
1279 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1280 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1086 """Used by dict_key_matches, matching the prefix to a list of keys
1281 """Used by dict_key_matches, matching the prefix to a list of keys
1087
1282
1088 Parameters
1283 Parameters
@@ -1102,47 +1297,89 b' def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], pre'
1102 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1297 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1103 ``quote`` being the quote that need to be used to close current string.
1298 ``quote`` being the quote that need to be used to close current string.
1104 ``token_start`` the position where the replacement should start occurring,
1299 ``token_start`` the position where the replacement should start occurring,
1105 ``matches`` a list of replacement/completion
1300 ``matches`` a dictionary of replacement/completion keys on keys and values
1106
1301 indicating whether the state.
1107 """
1302 """
1108 prefix_tuple = extra_prefix if extra_prefix else ()
1303 prefix_tuple = extra_prefix if extra_prefix else ()
1109 Nprefix = len(prefix_tuple)
1304
1305 prefix_tuple_size = sum(
1306 [
1307 # for pandas, do not count slices as taking space
1308 not isinstance(k, slice)
1309 for k in prefix_tuple
1310 ]
1311 )
1312 text_serializable_types = (str, bytes, int, float, slice)
1313
1110 def filter_prefix_tuple(key):
1314 def filter_prefix_tuple(key):
1111 # Reject too short keys
1315 # Reject too short keys
1112 if len(key) <= Nprefix:
1316 if len(key) <= prefix_tuple_size:
1113 return False
1317 return False
1114 # Reject keys with non str/bytes in it
1318 # Reject keys which cannot be serialised to text
1115 for k in key:
1319 for k in key:
1116 if not isinstance(k, (str, bytes)):
1320 if not isinstance(k, text_serializable_types):
1117 return False
1321 return False
1118 # Reject keys that do not match the prefix
1322 # Reject keys that do not match the prefix
1119 for k, pt in zip(key, prefix_tuple):
1323 for k, pt in zip(key, prefix_tuple):
1120 if k != pt:
1324 if k != pt and not isinstance(pt, slice):
1121 return False
1325 return False
1122 # All checks passed!
1326 # All checks passed!
1123 return True
1327 return True
1124
1328
1125 filtered_keys:List[Union[str,bytes]] = []
1329 filtered_key_is_final: Dict[
1126 def _add_to_filtered_keys(key):
1330 Union[str, bytes, int, float], _DictKeyState
1127 if isinstance(key, (str, bytes)):
1331 ] = defaultdict(lambda: _DictKeyState.BASELINE)
1128 filtered_keys.append(key)
1129
1332
1130 for k in keys:
1333 for k in keys:
1334 # If at least one of the matches is not final, mark as undetermined.
1335 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1336 # `111` appears final on first match but is not final on the second.
1337
1131 if isinstance(k, tuple):
1338 if isinstance(k, tuple):
1132 if filter_prefix_tuple(k):
1339 if filter_prefix_tuple(k):
1133 _add_to_filtered_keys(k[Nprefix])
1340 key_fragment = k[prefix_tuple_size]
1341 filtered_key_is_final[key_fragment] |= (
1342 _DictKeyState.END_OF_TUPLE
1343 if len(k) == prefix_tuple_size + 1
1344 else _DictKeyState.IN_TUPLE
1345 )
1346 elif prefix_tuple_size > 0:
1347 # we are completing a tuple but this key is not a tuple,
1348 # so we should ignore it
1349 pass
1134 else:
1350 else:
1135 _add_to_filtered_keys(k)
1351 if isinstance(k, text_serializable_types):
1352 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1353
1354 filtered_keys = filtered_key_is_final.keys()
1136
1355
1137 if not prefix:
1356 if not prefix:
1138 return '', 0, [repr(k) for k in filtered_keys]
1357 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1139 quote_match = re.search('["\']', prefix)
1358
1140 assert quote_match is not None # silence mypy
1359 quote_match = re.search("(?:\"|')", prefix)
1360 is_user_prefix_numeric = False
1361
1362 if quote_match:
1141 quote = quote_match.group()
1363 quote = quote_match.group()
1364 valid_prefix = prefix + quote
1142 try:
1365 try:
1143 prefix_str = eval(prefix + quote, {})
1366 prefix_str = literal_eval(valid_prefix)
1144 except Exception:
1367 except Exception:
1145 return '', 0, []
1368 return "", 0, {}
1369 else:
1370 # If it does not look like a string, let's assume
1371 # we are dealing with a number or variable.
1372 number_match = _match_number_in_dict_key_prefix(prefix)
1373
1374 # We do not want the key matcher to suggest variable names so we yield:
1375 if number_match is None:
1376 # The alternative would be to assume that user forgort the quote
1377 # and if the substring matches, suggest adding it at the start.
1378 return "", 0, {}
1379
1380 prefix_str = number_match
1381 is_user_prefix_numeric = True
1382 quote = ""
1146
1383
1147 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1384 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1148 token_match = re.search(pattern, prefix, re.UNICODE)
1385 token_match = re.search(pattern, prefix, re.UNICODE)
@@ -1150,17 +1387,36 b' def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], pre'
1150 token_start = token_match.start()
1387 token_start = token_match.start()
1151 token_prefix = token_match.group()
1388 token_prefix = token_match.group()
1152
1389
1153 matched:List[str] = []
1390 matched: Dict[str, _DictKeyState] = {}
1391
1392 str_key: Union[str, bytes]
1393
1154 for key in filtered_keys:
1394 for key in filtered_keys:
1395 if isinstance(key, (int, float)):
1396 # User typed a number but this key is not a number.
1397 if not is_user_prefix_numeric:
1398 continue
1399 str_key = str(key)
1400 if isinstance(key, int):
1401 int_base = prefix_str[:2].lower()
1402 # if user typed integer using binary/oct/hex notation:
1403 if int_base in _INT_FORMATS:
1404 int_format = _INT_FORMATS[int_base]
1405 str_key = int_format(key)
1406 else:
1407 # User typed a string but this key is a number.
1408 if is_user_prefix_numeric:
1409 continue
1410 str_key = key
1155 try:
1411 try:
1156 if not key.startswith(prefix_str):
1412 if not str_key.startswith(prefix_str):
1157 continue
1413 continue
1158 except (AttributeError, TypeError, UnicodeError):
1414 except (AttributeError, TypeError, UnicodeError) as e:
1159 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1415 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1160 continue
1416 continue
1161
1417
1162 # reformat remainder of key to begin with prefix
1418 # reformat remainder of key to begin with prefix
1163 rem = key[len(prefix_str):]
1419 rem = str_key[len(prefix_str) :]
1164 # force repr wrapped in '
1420 # force repr wrapped in '
1165 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1421 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1166 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1422 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
@@ -1171,7 +1427,9 b' def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], pre'
1171 rem_repr = rem_repr.replace('"', '\\"')
1427 rem_repr = rem_repr.replace('"', '\\"')
1172
1428
1173 # then reinsert prefix from start of token
1429 # then reinsert prefix from start of token
1174 matched.append('%s%s' % (token_prefix, rem_repr))
1430 match = "%s%s" % (token_prefix, rem_repr)
1431
1432 matched[match] = filtered_key_is_final[key]
1175 return quote, token_start, matched
1433 return quote, token_start, matched
1176
1434
1177
1435
@@ -1237,11 +1495,14 b' def position_to_cursor(text:str, offset:int)->Tuple[int, int]:'
1237 return line, col
1495 return line, col
1238
1496
1239
1497
1240 def _safe_isinstance(obj, module, class_name):
1498 def _safe_isinstance(obj, module, class_name, *attrs):
1241 """Checks if obj is an instance of module.class_name if loaded
1499 """Checks if obj is an instance of module.class_name if loaded
1242 """
1500 """
1243 return (module in sys.modules and
1501 if module in sys.modules:
1244 isinstance(obj, getattr(import_module(module), class_name)))
1502 m = sys.modules[module]
1503 for attr in [class_name, *attrs]:
1504 m = getattr(m, attr)
1505 return isinstance(obj, m)
1245
1506
1246
1507
1247 @context_matcher()
1508 @context_matcher()
@@ -1394,10 +1655,59 b' def _make_signature(completion)-> str:'
1394 _CompleteResult = Dict[str, MatcherResult]
1655 _CompleteResult = Dict[str, MatcherResult]
1395
1656
1396
1657
1658 DICT_MATCHER_REGEX = re.compile(
1659 r"""(?x)
1660 ( # match dict-referring - or any get item object - expression
1661 .+
1662 )
1663 \[ # open bracket
1664 \s* # and optional whitespace
1665 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1666 # and slices
1667 ((?:(?:
1668 (?: # closed string
1669 [uUbB]? # string prefix (r not handled)
1670 (?:
1671 '(?:[^']|(?<!\\)\\')*'
1672 |
1673 "(?:[^"]|(?<!\\)\\")*"
1674 )
1675 )
1676 |
1677 # capture integers and slices
1678 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1679 |
1680 # integer in bin/hex/oct notation
1681 0[bBxXoO]_?(?:\w|\d)+
1682 )
1683 \s*,\s*
1684 )*)
1685 ((?:
1686 (?: # unclosed string
1687 [uUbB]? # string prefix (r not handled)
1688 (?:
1689 '(?:[^']|(?<!\\)\\')*
1690 |
1691 "(?:[^"]|(?<!\\)\\")*
1692 )
1693 )
1694 |
1695 # unfinished integer
1696 (?:[-+]?\d+)
1697 |
1698 # integer in bin/hex/oct notation
1699 0[bBxXoO]_?(?:\w|\d)+
1700 )
1701 )?
1702 $
1703 """
1704 )
1705
1706
1397 def _convert_matcher_v1_result_to_v2(
1707 def _convert_matcher_v1_result_to_v2(
1398 matches: Sequence[str],
1708 matches: Sequence[str],
1399 type: str,
1709 type: str,
1400 fragment: str = None,
1710 fragment: Optional[str] = None,
1401 suppress_if_matches: bool = False,
1711 suppress_if_matches: bool = False,
1402 ) -> SimpleMatcherResult:
1712 ) -> SimpleMatcherResult:
1403 """Utility to help with transition"""
1713 """Utility to help with transition"""
@@ -1407,20 +1717,22 b' def _convert_matcher_v1_result_to_v2('
1407 }
1717 }
1408 if fragment is not None:
1718 if fragment is not None:
1409 result["matched_fragment"] = fragment
1719 result["matched_fragment"] = fragment
1410 return result
1720 return cast(SimpleMatcherResult, result)
1411
1721
1412
1722
1413 class IPCompleter(Completer):
1723 class IPCompleter(Completer):
1414 """Extension of the completer class with IPython-specific features"""
1724 """Extension of the completer class with IPython-specific features"""
1415
1725
1416 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1417
1418 @observe('greedy')
1726 @observe('greedy')
1419 def _greedy_changed(self, change):
1727 def _greedy_changed(self, change):
1420 """update the splitter and readline delims when greedy is changed"""
1728 """update the splitter and readline delims when greedy is changed"""
1421 if change['new']:
1729 if change["new"]:
1730 self.evaluation = "unsafe"
1731 self.auto_close_dict_keys = True
1422 self.splitter.delims = GREEDY_DELIMS
1732 self.splitter.delims = GREEDY_DELIMS
1423 else:
1733 else:
1734 self.evaluation = "limited"
1735 self.auto_close_dict_keys = False
1424 self.splitter.delims = DELIMS
1736 self.splitter.delims = DELIMS
1425
1737
1426 dict_keys_only = Bool(
1738 dict_keys_only = Bool(
@@ -1607,7 +1919,7 b' class IPCompleter(Completer):'
1607
1919
1608 if not self.backslash_combining_completions:
1920 if not self.backslash_combining_completions:
1609 for matcher in self._backslash_combining_matchers:
1921 for matcher in self._backslash_combining_matchers:
1610 self.disable_matchers.append(matcher.matcher_identifier)
1922 self.disable_matchers.append(_get_matcher_id(matcher))
1611
1923
1612 if not self.merge_completions:
1924 if not self.merge_completions:
1613 self.suppress_competing_matchers = True
1925 self.suppress_competing_matchers = True
@@ -1897,7 +2209,7 b' class IPCompleter(Completer):'
1897
2209
1898 def _jedi_matches(
2210 def _jedi_matches(
1899 self, cursor_column: int, cursor_line: int, text: str
2211 self, cursor_column: int, cursor_line: int, text: str
1900 ) -> Iterable[_JediCompletionLike]:
2212 ) -> Iterator[_JediCompletionLike]:
1901 """
2213 """
1902 Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
2214 Return a list of :any:`jedi.api.Completion`s object from a ``text`` and
1903 cursor position.
2215 cursor position.
@@ -1963,15 +2275,23 b' class IPCompleter(Completer):'
1963 print("Error detecting if completing a non-finished string :", e, '|')
2275 print("Error detecting if completing a non-finished string :", e, '|')
1964
2276
1965 if not try_jedi:
2277 if not try_jedi:
1966 return []
2278 return iter([])
1967 try:
2279 try:
1968 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2280 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1969 except Exception as e:
2281 except Exception as e:
1970 if self.debug:
2282 if self.debug:
1971 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
2283 return iter(
2284 [
2285 _FakeJediCompletion(
2286 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2287 % (e)
2288 )
2289 ]
2290 )
1972 else:
2291 else:
1973 return []
2292 return iter([])
1974
2293
2294 @completion_matcher(api_version=1)
1975 def python_matches(self, text: str) -> Iterable[str]:
2295 def python_matches(self, text: str) -> Iterable[str]:
1976 """Match attributes or global python names"""
2296 """Match attributes or global python names"""
1977 if "." in text:
2297 if "." in text:
@@ -2149,12 +2469,16 b' class IPCompleter(Completer):'
2149 return method()
2469 return method()
2150
2470
2151 # Special case some common in-memory dict-like types
2471 # Special case some common in-memory dict-like types
2152 if isinstance(obj, dict) or\
2472 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2153 _safe_isinstance(obj, 'pandas', 'DataFrame'):
2154 try:
2473 try:
2155 return list(obj.keys())
2474 return list(obj.keys())
2156 except Exception:
2475 except Exception:
2157 return []
2476 return []
2477 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2478 try:
2479 return list(obj.obj.keys())
2480 except Exception:
2481 return []
2158 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2482 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2159 _safe_isinstance(obj, 'numpy', 'void'):
2483 _safe_isinstance(obj, 'numpy', 'void'):
2160 return obj.dtype.names or []
2484 return obj.dtype.names or []
@@ -2175,74 +2499,49 b' class IPCompleter(Completer):'
2175 You can use :meth:`dict_key_matcher` instead.
2499 You can use :meth:`dict_key_matcher` instead.
2176 """
2500 """
2177
2501
2178 if self.__dict_key_regexps is not None:
2502 # Short-circuit on closed dictionary (regular expression would
2179 regexps = self.__dict_key_regexps
2503 # not match anyway, but would take quite a while).
2180 else:
2504 if self.text_until_cursor.strip().endswith("]"):
2181 dict_key_re_fmt = r'''(?x)
2505 return []
2182 ( # match dict-referring expression wrt greedy setting
2183 %s
2184 )
2185 \[ # open bracket
2186 \s* # and optional whitespace
2187 # Capture any number of str-like objects (e.g. "a", "b", 'c')
2188 ((?:[uUbB]? # string prefix (r not handled)
2189 (?:
2190 '(?:[^']|(?<!\\)\\')*'
2191 |
2192 "(?:[^"]|(?<!\\)\\")*"
2193 )
2194 \s*,\s*
2195 )*)
2196 ([uUbB]? # string prefix (r not handled)
2197 (?: # unclosed string
2198 '(?:[^']|(?<!\\)\\')*
2199 |
2200 "(?:[^"]|(?<!\\)\\")*
2201 )
2202 )?
2203 $
2204 '''
2205 regexps = self.__dict_key_regexps = {
2206 False: re.compile(dict_key_re_fmt % r'''
2207 # identifiers separated by .
2208 (?!\d)\w+
2209 (?:\.(?!\d)\w+)*
2210 '''),
2211 True: re.compile(dict_key_re_fmt % '''
2212 .+
2213 ''')
2214 }
2215
2506
2216 match = regexps[self.greedy].search(self.text_until_cursor)
2507 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2217
2508
2218 if match is None:
2509 if match is None:
2219 return []
2510 return []
2220
2511
2221 expr, prefix0, prefix = match.groups()
2512 expr, prior_tuple_keys, key_prefix = match.groups()
2222 try:
2513
2223 obj = eval(expr, self.namespace)
2514 obj = self._evaluate_expr(expr)
2224 except Exception:
2515
2225 try:
2516 if obj is not_found:
2226 obj = eval(expr, self.global_namespace)
2227 except Exception:
2228 return []
2517 return []
2229
2518
2230 keys = self._get_keys(obj)
2519 keys = self._get_keys(obj)
2231 if not keys:
2520 if not keys:
2232 return keys
2521 return keys
2233
2522
2234 extra_prefix = eval(prefix0) if prefix0 != '' else None
2523 tuple_prefix = guarded_eval(
2524 prior_tuple_keys,
2525 EvaluationContext(
2526 globals=self.global_namespace,
2527 locals=self.namespace,
2528 evaluation=self.evaluation,
2529 in_subscript=True,
2530 ),
2531 )
2235
2532
2236 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
2533 closing_quote, token_offset, matches = match_dict_keys(
2534 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2535 )
2237 if not matches:
2536 if not matches:
2238 return matches
2537 return []
2239
2538
2240 # get the cursor position of
2539 # get the cursor position of
2241 # - the text being completed
2540 # - the text being completed
2242 # - the start of the key text
2541 # - the start of the key text
2243 # - the start of the completion
2542 # - the start of the completion
2244 text_start = len(self.text_until_cursor) - len(text)
2543 text_start = len(self.text_until_cursor) - len(text)
2245 if prefix:
2544 if key_prefix:
2246 key_start = match.start(3)
2545 key_start = match.start(3)
2247 completion_start = key_start + token_offset
2546 completion_start = key_start + token_offset
2248 else:
2547 else:
@@ -2254,26 +2553,61 b' class IPCompleter(Completer):'
2254 else:
2553 else:
2255 leading = text[text_start:completion_start]
2554 leading = text[text_start:completion_start]
2256
2555
2257 # the index of the `[` character
2258 bracket_idx = match.end(1)
2259
2260 # append closing quote and bracket as appropriate
2556 # append closing quote and bracket as appropriate
2261 # this is *not* appropriate if the opening quote or bracket is outside
2557 # this is *not* appropriate if the opening quote or bracket is outside
2262 # the text given to this method
2558 # the text given to this method, e.g. `d["""a\nt
2263 suf = ''
2559 can_close_quote = False
2264 continuation = self.line_buffer[len(self.text_until_cursor):]
2560 can_close_bracket = False
2265 if key_start > text_start and closing_quote:
2561
2266 # quotes were opened inside text, maybe close them
2562 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2563
2267 if continuation.startswith(closing_quote):
2564 if continuation.startswith(closing_quote):
2565 # do not close if already closed, e.g. `d['a<tab>'`
2268 continuation = continuation[len(closing_quote):]
2566 continuation = continuation[len(closing_quote) :]
2269 else:
2567 else:
2270 suf += closing_quote
2568 can_close_quote = True
2271 if bracket_idx > text_start:
2569
2272 # brackets were opened inside text, maybe close them
2570 continuation = continuation.strip()
2273 if not continuation.startswith(']'):
2571
2274 suf += ']'
2572 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2573 # handling it is out of scope, so let's avoid appending suffixes.
2574 has_known_tuple_handling = isinstance(obj, dict)
2575
2576 can_close_bracket = (
2577 not continuation.startswith("]") and self.auto_close_dict_keys
2578 )
2579 can_close_tuple_item = (
2580 not continuation.startswith(",")
2581 and has_known_tuple_handling
2582 and self.auto_close_dict_keys
2583 )
2584 can_close_quote = can_close_quote and self.auto_close_dict_keys
2275
2585
2276 return [leading + k + suf for k in matches]
2586 # fast path if closing qoute should be appended but not suffix is allowed
2587 if not can_close_quote and not can_close_bracket and closing_quote:
2588 return [leading + k for k in matches]
2589
2590 results = []
2591
2592 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2593
2594 for k, state_flag in matches.items():
2595 result = leading + k
2596 if can_close_quote and closing_quote:
2597 result += closing_quote
2598
2599 if state_flag == end_of_tuple_or_item:
2600 # We do not know which suffix to add,
2601 # e.g. both tuple item and string
2602 # match this item.
2603 pass
2604
2605 if state_flag in end_of_tuple_or_item and can_close_bracket:
2606 result += "]"
2607 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2608 result += ", "
2609 results.append(result)
2610 return results
2277
2611
2278 @context_matcher()
2612 @context_matcher()
2279 def unicode_name_matcher(self, context: CompletionContext):
2613 def unicode_name_matcher(self, context: CompletionContext):
@@ -2516,17 +2850,23 b' class IPCompleter(Completer):'
2516
2850
2517 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2851 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2518
2852
2853 def is_non_jedi_result(
2854 result: MatcherResult, identifier: str
2855 ) -> TypeGuard[SimpleMatcherResult]:
2856 return identifier != jedi_matcher_id
2857
2519 results = self._complete(
2858 results = self._complete(
2520 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2859 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2521 )
2860 )
2861
2522 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2862 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2523 identifier: result
2863 identifier: result
2524 for identifier, result in results.items()
2864 for identifier, result in results.items()
2525 if identifier != jedi_matcher_id
2865 if is_non_jedi_result(result, identifier)
2526 }
2866 }
2527
2867
2528 jedi_matches = (
2868 jedi_matches = (
2529 cast(results[jedi_matcher_id], _JediMatcherResult)["completions"]
2869 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2530 if jedi_matcher_id in results
2870 if jedi_matcher_id in results
2531 else ()
2871 else ()
2532 )
2872 )
@@ -2581,8 +2921,8 b' class IPCompleter(Completer):'
2581 signature="",
2921 signature="",
2582 )
2922 )
2583
2923
2584 ordered = []
2924 ordered: List[Completion] = []
2585 sortable = []
2925 sortable: List[Completion] = []
2586
2926
2587 for origin, result in non_jedi_results.items():
2927 for origin, result in non_jedi_results.items():
2588 matched_text = result["matched_fragment"]
2928 matched_text = result["matched_fragment"]
@@ -2672,8 +3012,8 b' class IPCompleter(Completer):'
2672 abort_if_offset_changes: bool,
3012 abort_if_offset_changes: bool,
2673 ):
3013 ):
2674
3014
2675 sortable = []
3015 sortable: List[AnyMatcherCompletion] = []
2676 ordered = []
3016 ordered: List[AnyMatcherCompletion] = []
2677 most_recent_fragment = None
3017 most_recent_fragment = None
2678 for identifier, result in results.items():
3018 for identifier, result in results.items():
2679 if identifier in skip_matchers:
3019 if identifier in skip_matchers:
@@ -2772,11 +3112,11 b' class IPCompleter(Completer):'
2772 )
3112 )
2773
3113
2774 # Start with a clean slate of completions
3114 # Start with a clean slate of completions
2775 results = {}
3115 results: Dict[str, MatcherResult] = {}
2776
3116
2777 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3117 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2778
3118
2779 suppressed_matchers = set()
3119 suppressed_matchers: Set[str] = set()
2780
3120
2781 matchers = {
3121 matchers = {
2782 _get_matcher_id(matcher): matcher
3122 _get_matcher_id(matcher): matcher
@@ -2786,7 +3126,6 b' class IPCompleter(Completer):'
2786 }
3126 }
2787
3127
2788 for matcher_id, matcher in matchers.items():
3128 for matcher_id, matcher in matchers.items():
2789 api_version = _get_matcher_api_version(matcher)
2790 matcher_id = _get_matcher_id(matcher)
3129 matcher_id = _get_matcher_id(matcher)
2791
3130
2792 if matcher_id in self.disable_matchers:
3131 if matcher_id in self.disable_matchers:
@@ -2798,14 +3137,16 b' class IPCompleter(Completer):'
2798 if matcher_id in suppressed_matchers:
3137 if matcher_id in suppressed_matchers:
2799 continue
3138 continue
2800
3139
3140 result: MatcherResult
2801 try:
3141 try:
2802 if api_version == 1:
3142 if _is_matcher_v1(matcher):
2803 result = _convert_matcher_v1_result_to_v2(
3143 result = _convert_matcher_v1_result_to_v2(
2804 matcher(text), type=_UNKNOWN_TYPE
3144 matcher(text), type=_UNKNOWN_TYPE
2805 )
3145 )
2806 elif api_version == 2:
3146 elif _is_matcher_v2(matcher):
2807 result = cast(matcher, MatcherAPIv2)(context)
3147 result = matcher(context)
2808 else:
3148 else:
3149 api_version = _get_matcher_api_version(matcher)
2809 raise ValueError(f"Unsupported API version {api_version}")
3150 raise ValueError(f"Unsupported API version {api_version}")
2810 except:
3151 except:
2811 # Show the ugly traceback if the matcher causes an
3152 # Show the ugly traceback if the matcher causes an
@@ -2817,7 +3158,9 b' class IPCompleter(Completer):'
2817 result["matched_fragment"] = result.get("matched_fragment", context.token)
3158 result["matched_fragment"] = result.get("matched_fragment", context.token)
2818
3159
2819 if not suppressed_matchers:
3160 if not suppressed_matchers:
2820 suppression_recommended = result.get("suppress", False)
3161 suppression_recommended: Union[bool, Set[str]] = result.get(
3162 "suppress", False
3163 )
2821
3164
2822 suppression_config = (
3165 suppression_config = (
2823 self.suppress_competing_matchers.get(matcher_id, None)
3166 self.suppress_competing_matchers.get(matcher_id, None)
@@ -2830,10 +3173,12 b' class IPCompleter(Completer):'
2830 ) and has_any_completions(result)
3173 ) and has_any_completions(result)
2831
3174
2832 if should_suppress:
3175 if should_suppress:
2833 suppression_exceptions = result.get("do_not_suppress", set())
3176 suppression_exceptions: Set[str] = result.get(
2834 try:
3177 "do_not_suppress", set()
3178 )
3179 if isinstance(suppression_recommended, Iterable):
2835 to_suppress = set(suppression_recommended)
3180 to_suppress = set(suppression_recommended)
2836 except TypeError:
3181 else:
2837 to_suppress = set(matchers)
3182 to_suppress = set(matchers)
2838 suppressed_matchers = to_suppress - suppression_exceptions
3183 suppressed_matchers = to_suppress - suppression_exceptions
2839
3184
@@ -2860,9 +3205,9 b' class IPCompleter(Completer):'
2860
3205
2861 @staticmethod
3206 @staticmethod
2862 def _deduplicate(
3207 def _deduplicate(
2863 matches: Sequence[SimpleCompletion],
3208 matches: Sequence[AnyCompletion],
2864 ) -> Iterable[SimpleCompletion]:
3209 ) -> Iterable[AnyCompletion]:
2865 filtered_matches = {}
3210 filtered_matches: Dict[str, AnyCompletion] = {}
2866 for match in matches:
3211 for match in matches:
2867 text = match.text
3212 text = match.text
2868 if (
3213 if (
@@ -2874,7 +3219,7 b' class IPCompleter(Completer):'
2874 return filtered_matches.values()
3219 return filtered_matches.values()
2875
3220
2876 @staticmethod
3221 @staticmethod
2877 def _sort(matches: Sequence[SimpleCompletion]):
3222 def _sort(matches: Sequence[AnyCompletion]):
2878 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3223 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
2879
3224
2880 @context_matcher()
3225 @context_matcher()
@@ -389,6 +389,9 b' class InteractiveShell(SingletonConfigurable):'
389 displayhook_class = Type(DisplayHook)
389 displayhook_class = Type(DisplayHook)
390 display_pub_class = Type(DisplayPublisher)
390 display_pub_class = Type(DisplayPublisher)
391 compiler_class = Type(CachingCompiler)
391 compiler_class = Type(CachingCompiler)
392 inspector_class = Type(
393 oinspect.Inspector, help="Class to use to instantiate the shell inspector"
394 ).tag(config=True)
392
395
393 sphinxify_docstring = Bool(False, help=
396 sphinxify_docstring = Bool(False, help=
394 """
397 """
@@ -755,10 +758,12 b' class InteractiveShell(SingletonConfigurable):'
755 @observe('colors')
758 @observe('colors')
756 def init_inspector(self, changes=None):
759 def init_inspector(self, changes=None):
757 # Object inspector
760 # Object inspector
758 self.inspector = oinspect.Inspector(oinspect.InspectColors,
761 self.inspector = self.inspector_class(
762 oinspect.InspectColors,
759 PyColorize.ANSICodeColors,
763 PyColorize.ANSICodeColors,
760 self.colors,
764 self.colors,
761 self.object_info_string_level)
765 self.object_info_string_level,
766 )
762
767
763 def init_io(self):
768 def init_io(self):
764 # implemented in subclasses, TerminalInteractiveShell does call
769 # implemented in subclasses, TerminalInteractiveShell does call
@@ -3154,8 +3159,12 b' class InteractiveShell(SingletonConfigurable):'
3154 else:
3159 else:
3155 cell = raw_cell
3160 cell = raw_cell
3156
3161
3162 # Do NOT store paste/cpaste magic history
3163 if "get_ipython().run_line_magic(" in cell and "paste" in cell:
3164 store_history = False
3165
3157 # Store raw and processed history
3166 # Store raw and processed history
3158 if store_history and raw_cell.strip(" %") != "paste":
3167 if store_history:
3159 self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
3168 self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
3160 if not silent:
3169 if not silent:
3161 self.logger.log(cell, raw_cell)
3170 self.logger.log(cell, raw_cell)
@@ -68,94 +68,22 b' class ConfigMagics(Magics):'
68 To view what is configurable on a given class, just pass the class
68 To view what is configurable on a given class, just pass the class
69 name::
69 name::
70
70
71 In [2]: %config IPCompleter
71 In [2]: %config LoggingMagics
72 IPCompleter(Completer) options
72 LoggingMagics(Magics) options
73 ----------------------------
73 ---------------------------
74 IPCompleter.backslash_combining_completions=<Bool>
74 LoggingMagics.quiet=<Bool>
75 Enable unicode completions, e.g. \\alpha<tab> . Includes completion of latex
75 Suppress output of log state when logging is enabled
76 commands, unicode names, and expanding unicode characters back to latex
77 commands.
78 Current: True
79 IPCompleter.debug=<Bool>
80 Enable debug for the Completer. Mostly print extra information for
81 experimental jedi integration.
82 Current: False
76 Current: False
83 IPCompleter.disable_matchers=<list-item-1>...
84 List of matchers to disable.
85 The list should contain matcher identifiers (see
86 :any:`completion_matcher`).
87 Current: []
88 IPCompleter.greedy=<Bool>
89 Activate greedy completion
90 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
91 This will enable completion on elements of lists, results of function calls, etc.,
92 but can be unsafe because the code is actually evaluated on TAB.
93 Current: False
94 IPCompleter.jedi_compute_type_timeout=<Int>
95 Experimental: restrict time (in milliseconds) during which Jedi can compute types.
96 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
97 performance by preventing jedi to build its cache.
98 Current: 400
99 IPCompleter.limit_to__all__=<Bool>
100 DEPRECATED as of version 5.0.
101 Instruct the completer to use __all__ for the completion
102 Specifically, when completing on ``object.<tab>``.
103 When True: only those names in obj.__all__ will be included.
104 When False [default]: the __all__ attribute is ignored
105 Current: False
106 IPCompleter.merge_completions=<Bool>
107 Whether to merge completion results into a single list
108 If False, only the completion results from the first non-empty
109 completer will be returned.
110 As of version 8.6.0, setting the value to ``False`` is an alias for:
111 ``IPCompleter.suppress_competing_matchers = True.``.
112 Current: True
113 IPCompleter.omit__names=<Enum>
114 Instruct the completer to omit private method names
115 Specifically, when completing on ``object.<tab>``.
116 When 2 [default]: all names that start with '_' will be excluded.
117 When 1: all 'magic' names (``__foo__``) will be excluded.
118 When 0: nothing will be excluded.
119 Choices: any of [0, 1, 2]
120 Current: 2
121 IPCompleter.profile_completions=<Bool>
122 If True, emit profiling data for completion subsystem using cProfile.
123 Current: False
124 IPCompleter.profiler_output_dir=<Unicode>
125 Template for path at which to output profile data for completions.
126 Current: '.completion_profiles'
127 IPCompleter.suppress_competing_matchers=<Union>
128 Whether to suppress completions from other *Matchers*.
129 When set to ``None`` (default) the matchers will attempt to auto-detect
130 whether suppression of other matchers is desirable. For example, at the
131 beginning of a line followed by `%` we expect a magic completion to be the
132 only applicable option, and after ``my_dict['`` we usually expect a
133 completion with an existing dictionary key.
134 If you want to disable this heuristic and see completions from all matchers,
135 set ``IPCompleter.suppress_competing_matchers = False``. To disable the
136 heuristic for specific matchers provide a dictionary mapping:
137 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher':
138 False}``.
139 Set ``IPCompleter.suppress_competing_matchers = True`` to limit completions
140 to the set of matchers with the highest priority; this is equivalent to
141 ``IPCompleter.merge_completions`` and can be beneficial for performance, but
142 will sometimes omit relevant candidates from matchers further down the
143 priority list.
144 Current: None
145 IPCompleter.use_jedi=<Bool>
146 Experimental: Use Jedi to generate autocompletions. Default to True if jedi
147 is installed.
148 Current: True
149
77
150 but the real use is in setting values::
78 but the real use is in setting values::
151
79
152 In [3]: %config IPCompleter.greedy = True
80 In [3]: %config LoggingMagics.quiet = True
153
81
154 and these values are read from the user_ns if they are variables::
82 and these values are read from the user_ns if they are variables::
155
83
156 In [4]: feeling_greedy=False
84 In [4]: feeling_quiet=False
157
85
158 In [5]: %config IPCompleter.greedy = feeling_greedy
86 In [5]: %config LoggingMagics.quiet = feeling_quiet
159
87
160 """
88 """
161 from traitlets.config.loader import Config
89 from traitlets.config.loader import Config
@@ -210,7 +210,7 b' class ScriptMagics(Magics):'
210
210
211 async def _handle_stream(stream, stream_arg, file_object):
211 async def _handle_stream(stream, stream_arg, file_object):
212 while True:
212 while True:
213 line = (await stream.readline()).decode("utf8")
213 line = (await stream.readline()).decode("utf8", errors="replace")
214 if not line:
214 if not line:
215 break
215 break
216 if stream_arg:
216 if stream_arg:
@@ -16,6 +16,7 b" __all__ = ['Inspector','InspectColors']"
16 import ast
16 import ast
17 import inspect
17 import inspect
18 from inspect import signature
18 from inspect import signature
19 import html
19 import linecache
20 import linecache
20 import warnings
21 import warnings
21 import os
22 import os
@@ -530,8 +531,8 b' class Inspector(Colorable):'
530
531
531 """
532 """
532 defaults = {
533 defaults = {
533 'text/plain': text,
534 "text/plain": text,
534 'text/html': '<pre>' + text + '</pre>'
535 "text/html": f"<pre>{html.escape(text)}</pre>",
535 }
536 }
536
537
537 if formatter is None:
538 if formatter is None:
@@ -542,66 +543,66 b' class Inspector(Colorable):'
542 if not isinstance(formatted, dict):
543 if not isinstance(formatted, dict):
543 # Handle the deprecated behavior of a formatter returning
544 # Handle the deprecated behavior of a formatter returning
544 # a string instead of a mime bundle.
545 # a string instead of a mime bundle.
545 return {
546 return {"text/plain": formatted, "text/html": f"<pre>{formatted}</pre>"}
546 'text/plain': formatted,
547 'text/html': '<pre>' + formatted + '</pre>'
548 }
549
547
550 else:
548 else:
551 return dict(defaults, **formatted)
549 return dict(defaults, **formatted)
552
550
553
551
554 def format_mime(self, bundle):
552 def format_mime(self, bundle):
553 """Format a mimebundle being created by _make_info_unformatted into a real mimebundle"""
554 # Format text/plain mimetype
555 if isinstance(bundle["text/plain"], (list, tuple)):
556 # bundle['text/plain'] is a list of (head, formatted body) pairs
557 lines = []
558 _len = max(len(h) for h, _ in bundle["text/plain"])
555
559
556 text_plain = bundle['text/plain']
560 for head, body in bundle["text/plain"]:
557
561 body = body.strip("\n")
558 text = ''
562 delim = "\n" if "\n" in body else " "
559 heads, bodies = list(zip(*text_plain))
563 lines.append(
560 _len = max(len(h) for h in heads)
564 f"{self.__head(head+':')}{(_len - len(head))*' '}{delim}{body}"
565 )
561
566
562 for head, body in zip(heads, bodies):
567 bundle["text/plain"] = "\n".join(lines)
563 body = body.strip('\n')
564 delim = '\n' if '\n' in body else ' '
565 text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n'
566
568
567 bundle['text/plain'] = text
569 # Format the text/html mimetype
570 if isinstance(bundle["text/html"], (list, tuple)):
571 # bundle['text/html'] is a list of (head, formatted body) pairs
572 bundle["text/html"] = "\n".join(
573 (f"<h1>{head}</h1>\n{body}" for (head, body) in bundle["text/html"])
574 )
568 return bundle
575 return bundle
569
576
570 def _get_info(
577 def _append_info_field(
571 self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=()
578 self, bundle, title: str, key: str, info, omit_sections, formatter
572 ):
579 ):
573 """Retrieve an info dict and format it.
580 """Append an info value to the unformatted mimebundle being constructed by _make_info_unformatted"""
574
575 Parameters
576 ----------
577 obj : any
578 Object to inspect and return info from
579 oname : str (default: ''):
580 Name of the variable pointing to `obj`.
581 formatter : callable
582 info
583 already computed information
584 detail_level : integer
585 Granularity of detail level, if set to 1, give more information.
586 omit_sections : container[str]
587 Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
588 """
589
590 info = self.info(obj, oname=oname, info=info, detail_level=detail_level)
591
592 _mime = {
593 'text/plain': [],
594 'text/html': '',
595 }
596
597 def append_field(bundle, title:str, key:str, formatter=None):
598 if title in omit_sections or key in omit_sections:
581 if title in omit_sections or key in omit_sections:
599 return
582 return
600 field = info[key]
583 field = info[key]
601 if field is not None:
584 if field is not None:
602 formatted_field = self._mime_format(field, formatter)
585 formatted_field = self._mime_format(field, formatter)
603 bundle['text/plain'].append((title, formatted_field['text/plain']))
586 bundle["text/plain"].append((title, formatted_field["text/plain"]))
604 bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n'
587 bundle["text/html"].append((title, formatted_field["text/html"]))
588
589 def _make_info_unformatted(self, obj, info, formatter, detail_level, omit_sections):
590 """Assemble the mimebundle as unformatted lists of information"""
591 bundle = {
592 "text/plain": [],
593 "text/html": [],
594 }
595
596 # A convenience function to simplify calls below
597 def append_field(bundle, title: str, key: str, formatter=None):
598 self._append_info_field(
599 bundle,
600 title=title,
601 key=key,
602 info=info,
603 omit_sections=omit_sections,
604 formatter=formatter,
605 )
605
606
606 def code_formatter(text):
607 def code_formatter(text):
607 return {
608 return {
@@ -609,57 +610,82 b' class Inspector(Colorable):'
609 'text/html': pylight(text)
610 'text/html': pylight(text)
610 }
611 }
611
612
612 if info['isalias']:
613 if info["isalias"]:
613 append_field(_mime, 'Repr', 'string_form')
614 append_field(bundle, "Repr", "string_form")
614
615
615 elif info['ismagic']:
616 elif info['ismagic']:
616 if detail_level > 0:
617 if detail_level > 0:
617 append_field(_mime, 'Source', 'source', code_formatter)
618 append_field(bundle, "Source", "source", code_formatter)
618 else:
619 else:
619 append_field(_mime, 'Docstring', 'docstring', formatter)
620 append_field(bundle, "Docstring", "docstring", formatter)
620 append_field(_mime, 'File', 'file')
621 append_field(bundle, "File", "file")
621
622
622 elif info['isclass'] or is_simple_callable(obj):
623 elif info['isclass'] or is_simple_callable(obj):
623 # Functions, methods, classes
624 # Functions, methods, classes
624 append_field(_mime, 'Signature', 'definition', code_formatter)
625 append_field(bundle, "Signature", "definition", code_formatter)
625 append_field(_mime, 'Init signature', 'init_definition', code_formatter)
626 append_field(bundle, "Init signature", "init_definition", code_formatter)
626 append_field(_mime, 'Docstring', 'docstring', formatter)
627 append_field(bundle, "Docstring", "docstring", formatter)
627 if detail_level > 0 and info['source']:
628 if detail_level > 0 and info["source"]:
628 append_field(_mime, 'Source', 'source', code_formatter)
629 append_field(bundle, "Source", "source", code_formatter)
629 else:
630 else:
630 append_field(_mime, 'Init docstring', 'init_docstring', formatter)
631 append_field(bundle, "Init docstring", "init_docstring", formatter)
631
632
632 append_field(_mime, 'File', 'file')
633 append_field(bundle, "File", "file")
633 append_field(_mime, 'Type', 'type_name')
634 append_field(bundle, "Type", "type_name")
634 append_field(_mime, 'Subclasses', 'subclasses')
635 append_field(bundle, "Subclasses", "subclasses")
635
636
636 else:
637 else:
637 # General Python objects
638 # General Python objects
638 append_field(_mime, 'Signature', 'definition', code_formatter)
639 append_field(bundle, "Signature", "definition", code_formatter)
639 append_field(_mime, 'Call signature', 'call_def', code_formatter)
640 append_field(bundle, "Call signature", "call_def", code_formatter)
640 append_field(_mime, 'Type', 'type_name')
641 append_field(bundle, "Type", "type_name")
641 append_field(_mime, 'String form', 'string_form')
642 append_field(bundle, "String form", "string_form")
642
643
643 # Namespace
644 # Namespace
644 if info['namespace'] != 'Interactive':
645 if info["namespace"] != "Interactive":
645 append_field(_mime, 'Namespace', 'namespace')
646 append_field(bundle, "Namespace", "namespace")
646
647
647 append_field(_mime, 'Length', 'length')
648 append_field(bundle, "Length", "length")
648 append_field(_mime, 'File', 'file')
649 append_field(bundle, "File", "file")
649
650
650 # Source or docstring, depending on detail level and whether
651 # Source or docstring, depending on detail level and whether
651 # source found.
652 # source found.
652 if detail_level > 0 and info['source']:
653 if detail_level > 0 and info["source"]:
653 append_field(_mime, 'Source', 'source', code_formatter)
654 append_field(bundle, "Source", "source", code_formatter)
654 else:
655 else:
655 append_field(_mime, 'Docstring', 'docstring', formatter)
656 append_field(bundle, "Docstring", "docstring", formatter)
657
658 append_field(bundle, "Class docstring", "class_docstring", formatter)
659 append_field(bundle, "Init docstring", "init_docstring", formatter)
660 append_field(bundle, "Call docstring", "call_docstring", formatter)
661 return bundle
656
662
657 append_field(_mime, 'Class docstring', 'class_docstring', formatter)
658 append_field(_mime, 'Init docstring', 'init_docstring', formatter)
659 append_field(_mime, 'Call docstring', 'call_docstring', formatter)
660
663
664 def _get_info(
665 self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=()
666 ):
667 """Retrieve an info dict and format it.
668
669 Parameters
670 ----------
671 obj : any
672 Object to inspect and return info from
673 oname : str (default: ''):
674 Name of the variable pointing to `obj`.
675 formatter : callable
676 info
677 already computed information
678 detail_level : integer
679 Granularity of detail level, if set to 1, give more information.
680 omit_sections : container[str]
681 Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`)
682 """
661
683
662 return self.format_mime(_mime)
684 info = self.info(obj, oname=oname, info=info, detail_level=detail_level)
685 bundle = self._make_info_unformatted(
686 obj, info, formatter, detail_level=detail_level, omit_sections=omit_sections
687 )
688 return self.format_mime(bundle)
663
689
664 def pinfo(
690 def pinfo(
665 self,
691 self,
@@ -16,7 +16,7 b''
16 # release. 'dev' as a _version_extra string means this is a development
16 # release. 'dev' as a _version_extra string means this is a development
17 # version
17 # version
18 _version_major = 8
18 _version_major = 8
19 _version_minor = 8
19 _version_minor = 9
20 _version_patch = 0
20 _version_patch = 0
21 _version_extra = ".dev"
21 _version_extra = ".dev"
22 # _version_extra = "rc1"
22 # _version_extra = "rc1"
@@ -24,6 +24,7 b' from IPython.core.completer import ('
24 provisionalcompleter,
24 provisionalcompleter,
25 match_dict_keys,
25 match_dict_keys,
26 _deduplicate_completions,
26 _deduplicate_completions,
27 _match_number_in_dict_key_prefix,
27 completion_matcher,
28 completion_matcher,
28 SimpleCompletion,
29 SimpleCompletion,
29 CompletionContext,
30 CompletionContext,
@@ -98,7 +99,7 b' def test_unicode_range():'
98 assert len_exp == len_test, message
99 assert len_exp == len_test, message
99
100
100 # fail if new unicode symbols have been added.
101 # fail if new unicode symbols have been added.
101 assert len_exp <= 138552, message
102 assert len_exp <= 143041, message
102
103
103
104
104 @contextmanager
105 @contextmanager
@@ -113,6 +114,17 b' def greedy_completion():'
113
114
114
115
115 @contextmanager
116 @contextmanager
117 def evaluation_policy(evaluation: str):
118 ip = get_ipython()
119 evaluation_original = ip.Completer.evaluation
120 try:
121 ip.Completer.evaluation = evaluation
122 yield
123 finally:
124 ip.Completer.evaluation = evaluation_original
125
126
127 @contextmanager
116 def custom_matchers(matchers):
128 def custom_matchers(matchers):
117 ip = get_ipython()
129 ip = get_ipython()
118 try:
130 try:
@@ -170,7 +182,6 b' def check_line_split(splitter, test_specs):'
170 out = splitter.split_line(line, cursor_pos)
182 out = splitter.split_line(line, cursor_pos)
171 assert out == split
183 assert out == split
172
184
173
174 def test_line_split():
185 def test_line_split():
175 """Basic line splitter test with default specs."""
186 """Basic line splitter test with default specs."""
176 sp = completer.CompletionSplitter()
187 sp = completer.CompletionSplitter()
@@ -841,18 +852,45 b' class TestCompleter(unittest.TestCase):'
841 """
852 """
842 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
853 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
843
854
844 keys = ["foo", b"far"]
855 def match(*args, **kwargs):
845 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
856 quote, offset, matches = match_dict_keys(*args, delims=delims, **kwargs)
846 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
857 return quote, offset, list(matches)
847 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
848 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
849
850 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
851 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
852 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
853 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
854
858
855 match_dict_keys
859 keys = ["foo", b"far"]
860 assert match(keys, "b'") == ("'", 2, ["far"])
861 assert match(keys, "b'f") == ("'", 2, ["far"])
862 assert match(keys, 'b"') == ('"', 2, ["far"])
863 assert match(keys, 'b"f') == ('"', 2, ["far"])
864
865 assert match(keys, "'") == ("'", 1, ["foo"])
866 assert match(keys, "'f") == ("'", 1, ["foo"])
867 assert match(keys, '"') == ('"', 1, ["foo"])
868 assert match(keys, '"f') == ('"', 1, ["foo"])
869
870 # Completion on first item of tuple
871 keys = [("foo", 1111), ("foo", 2222), (3333, "bar"), (3333, "test")]
872 assert match(keys, "'f") == ("'", 1, ["foo"])
873 assert match(keys, "33") == ("", 0, ["3333"])
874
875 # Completion on numbers
876 keys = [
877 0xDEADBEEF,
878 1111,
879 1234,
880 "1999",
881 0b10101,
882 22,
883 ] # 0xDEADBEEF = 3735928559; 0b10101 = 21
884 assert match(keys, "0xdead") == ("", 0, ["0xdeadbeef"])
885 assert match(keys, "1") == ("", 0, ["1111", "1234"])
886 assert match(keys, "2") == ("", 0, ["21", "22"])
887 assert match(keys, "0b101") == ("", 0, ["0b10101", "0b10110"])
888
889 # Should yield on variables
890 assert match(keys, "a_variable") == ("", 0, [])
891
892 # Should pass over invalid literals
893 assert match(keys, "'' ''") == ("", 0, [])
856
894
857 def test_match_dict_keys_tuple(self):
895 def test_match_dict_keys_tuple(self):
858 """
896 """
@@ -863,25 +901,91 b' class TestCompleter(unittest.TestCase):'
863
901
864 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
902 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
865
903
904 def match(*args, extra=None, **kwargs):
905 quote, offset, matches = match_dict_keys(
906 *args, delims=delims, extra_prefix=extra, **kwargs
907 )
908 return quote, offset, list(matches)
909
866 # Completion on first key == "foo"
910 # Completion on first key == "foo"
867 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
911 assert match(keys, "'", extra=("foo",)) == ("'", 1, ["bar", "oof"])
868 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
912 assert match(keys, '"', extra=("foo",)) == ('"', 1, ["bar", "oof"])
869 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
913 assert match(keys, "'o", extra=("foo",)) == ("'", 1, ["oof"])
870 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
914 assert match(keys, '"o', extra=("foo",)) == ('"', 1, ["oof"])
871 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
915 assert match(keys, "b'", extra=("foo",)) == ("'", 2, ["bar"])
872 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
916 assert match(keys, 'b"', extra=("foo",)) == ('"', 2, ["bar"])
873 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
917 assert match(keys, "b'b", extra=("foo",)) == ("'", 2, ["bar"])
874 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
918 assert match(keys, 'b"b', extra=("foo",)) == ('"', 2, ["bar"])
875
919
876 # No Completion
920 # No Completion
877 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
921 assert match(keys, "'", extra=("no_foo",)) == ("'", 1, [])
878 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
922 assert match(keys, "'", extra=("fo",)) == ("'", 1, [])
923
924 keys = [("foo1", "foo2", "foo3", "foo4"), ("foo1", "foo2", "bar", "foo4")]
925 assert match(keys, "'foo", extra=("foo1",)) == ("'", 1, ["foo2"])
926 assert match(keys, "'foo", extra=("foo1", "foo2")) == ("'", 1, ["foo3"])
927 assert match(keys, "'foo", extra=("foo1", "foo2", "foo3")) == ("'", 1, ["foo4"])
928 assert match(keys, "'foo", extra=("foo1", "foo2", "foo3", "foo4")) == (
929 "'",
930 1,
931 [],
932 )
933
934 keys = [("foo", 1111), ("foo", "2222"), (3333, "bar"), (3333, 4444)]
935 assert match(keys, "'", extra=("foo",)) == ("'", 1, ["2222"])
936 assert match(keys, "", extra=("foo",)) == ("", 0, ["1111", "'2222'"])
937 assert match(keys, "'", extra=(3333,)) == ("'", 1, ["bar"])
938 assert match(keys, "", extra=(3333,)) == ("", 0, ["'bar'", "4444"])
939 assert match(keys, "'", extra=("3333",)) == ("'", 1, [])
940 assert match(keys, "33") == ("", 0, ["3333"])
941
942 def test_dict_key_completion_closures(self):
943 ip = get_ipython()
944 complete = ip.Completer.complete
945 ip.Completer.auto_close_dict_keys = True
946
947 ip.user_ns["d"] = {
948 # tuple only
949 ("aa", 11): None,
950 # tuple and non-tuple
951 ("bb", 22): None,
952 "bb": None,
953 # non-tuple only
954 "cc": None,
955 # numeric tuple only
956 (77, "x"): None,
957 # numeric tuple and non-tuple
958 (88, "y"): None,
959 88: None,
960 # numeric non-tuple only
961 99: None,
962 }
879
963
880 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
964 _, matches = complete(line_buffer="d[")
881 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
965 # should append `, ` if matches a tuple only
882 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
966 self.assertIn("'aa', ", matches)
883 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
967 # should not append anything if matches a tuple and an item
884 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
968 self.assertIn("'bb'", matches)
969 # should append `]` if matches and item only
970 self.assertIn("'cc']", matches)
971
972 # should append `, ` if matches a tuple only
973 self.assertIn("77, ", matches)
974 # should not append anything if matches a tuple and an item
975 self.assertIn("88", matches)
976 # should append `]` if matches and item only
977 self.assertIn("99]", matches)
978
979 _, matches = complete(line_buffer="d['aa', ")
980 # should restrict matches to those matching tuple prefix
981 self.assertIn("11]", matches)
982 self.assertNotIn("'bb'", matches)
983 self.assertNotIn("'bb', ", matches)
984 self.assertNotIn("'bb']", matches)
985 self.assertNotIn("'cc'", matches)
986 self.assertNotIn("'cc', ", matches)
987 self.assertNotIn("'cc']", matches)
988 ip.Completer.auto_close_dict_keys = False
885
989
886 def test_dict_key_completion_string(self):
990 def test_dict_key_completion_string(self):
887 """Test dictionary key completion for string keys"""
991 """Test dictionary key completion for string keys"""
@@ -1038,6 +1142,35 b' class TestCompleter(unittest.TestCase):'
1038 self.assertNotIn("foo", matches)
1142 self.assertNotIn("foo", matches)
1039 self.assertNotIn("bar", matches)
1143 self.assertNotIn("bar", matches)
1040
1144
1145 def test_dict_key_completion_numbers(self):
1146 ip = get_ipython()
1147 complete = ip.Completer.complete
1148
1149 ip.user_ns["d"] = {
1150 0xDEADBEEF: None, # 3735928559
1151 1111: None,
1152 1234: None,
1153 "1999": None,
1154 0b10101: None, # 21
1155 22: None,
1156 }
1157 _, matches = complete(line_buffer="d[1")
1158 self.assertIn("1111", matches)
1159 self.assertIn("1234", matches)
1160 self.assertNotIn("1999", matches)
1161 self.assertNotIn("'1999'", matches)
1162
1163 _, matches = complete(line_buffer="d[0xdead")
1164 self.assertIn("0xdeadbeef", matches)
1165
1166 _, matches = complete(line_buffer="d[2")
1167 self.assertIn("21", matches)
1168 self.assertIn("22", matches)
1169
1170 _, matches = complete(line_buffer="d[0b101")
1171 self.assertIn("0b10101", matches)
1172 self.assertIn("0b10110", matches)
1173
1041 def test_dict_key_completion_contexts(self):
1174 def test_dict_key_completion_contexts(self):
1042 """Test expression contexts in which dict key completion occurs"""
1175 """Test expression contexts in which dict key completion occurs"""
1043 ip = get_ipython()
1176 ip = get_ipython()
@@ -1050,6 +1183,7 b' class TestCompleter(unittest.TestCase):'
1050
1183
1051 ip.user_ns["C"] = C
1184 ip.user_ns["C"] = C
1052 ip.user_ns["get"] = lambda: d
1185 ip.user_ns["get"] = lambda: d
1186 ip.user_ns["nested"] = {"x": d}
1053
1187
1054 def assert_no_completion(**kwargs):
1188 def assert_no_completion(**kwargs):
1055 _, matches = complete(**kwargs)
1189 _, matches = complete(**kwargs)
@@ -1075,6 +1209,13 b' class TestCompleter(unittest.TestCase):'
1075 assert_completion(line_buffer="(d[")
1209 assert_completion(line_buffer="(d[")
1076 assert_completion(line_buffer="C.data[")
1210 assert_completion(line_buffer="C.data[")
1077
1211
1212 # nested dict completion
1213 assert_completion(line_buffer="nested['x'][")
1214
1215 with evaluation_policy("minimal"):
1216 with pytest.raises(AssertionError):
1217 assert_completion(line_buffer="nested['x'][")
1218
1078 # greedy flag
1219 # greedy flag
1079 def assert_completion(**kwargs):
1220 def assert_completion(**kwargs):
1080 _, matches = complete(**kwargs)
1221 _, matches = complete(**kwargs)
@@ -1162,12 +1303,22 b' class TestCompleter(unittest.TestCase):'
1162 _, matches = complete(line_buffer="d['")
1303 _, matches = complete(line_buffer="d['")
1163 self.assertIn("my_head", matches)
1304 self.assertIn("my_head", matches)
1164 self.assertIn("my_data", matches)
1305 self.assertIn("my_data", matches)
1165 # complete on a nested level
1306
1166 with greedy_completion():
1307 def completes_on_nested():
1167 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1308 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1168 _, matches = complete(line_buffer="d[1]['my_head']['")
1309 _, matches = complete(line_buffer="d[1]['my_head']['")
1169 self.assertTrue(any(["my_dt" in m for m in matches]))
1310 self.assertTrue(any(["my_dt" in m for m in matches]))
1170 self.assertTrue(any(["my_df" in m for m in matches]))
1311 self.assertTrue(any(["my_df" in m for m in matches]))
1312 # complete on a nested level
1313 with greedy_completion():
1314 completes_on_nested()
1315
1316 with evaluation_policy("limited"):
1317 completes_on_nested()
1318
1319 with evaluation_policy("minimal"):
1320 with pytest.raises(AssertionError):
1321 completes_on_nested()
1171
1322
1172 @dec.skip_without("pandas")
1323 @dec.skip_without("pandas")
1173 def test_dataframe_key_completion(self):
1324 def test_dataframe_key_completion(self):
@@ -1180,6 +1331,17 b' class TestCompleter(unittest.TestCase):'
1180 _, matches = complete(line_buffer="d['")
1331 _, matches = complete(line_buffer="d['")
1181 self.assertIn("hello", matches)
1332 self.assertIn("hello", matches)
1182 self.assertIn("world", matches)
1333 self.assertIn("world", matches)
1334 _, matches = complete(line_buffer="d.loc[:, '")
1335 self.assertIn("hello", matches)
1336 self.assertIn("world", matches)
1337 _, matches = complete(line_buffer="d.loc[1:, '")
1338 self.assertIn("hello", matches)
1339 _, matches = complete(line_buffer="d.loc[1:1, '")
1340 self.assertIn("hello", matches)
1341 _, matches = complete(line_buffer="d.loc[1:1:-1, '")
1342 self.assertIn("hello", matches)
1343 _, matches = complete(line_buffer="d.loc[::, '")
1344 self.assertIn("hello", matches)
1183
1345
1184 def test_dict_key_completion_invalids(self):
1346 def test_dict_key_completion_invalids(self):
1185 """Smoke test cases dict key completion can't handle"""
1347 """Smoke test cases dict key completion can't handle"""
@@ -1503,3 +1665,38 b' class TestCompleter(unittest.TestCase):'
1503 _(["completion_b"])
1665 _(["completion_b"])
1504 a_matcher.matcher_priority = 3
1666 a_matcher.matcher_priority = 3
1505 _(["completion_a"])
1667 _(["completion_a"])
1668
1669
1670 @pytest.mark.parametrize(
1671 "input, expected",
1672 [
1673 ["1.234", "1.234"],
1674 # should match signed numbers
1675 ["+1", "+1"],
1676 ["-1", "-1"],
1677 ["-1.0", "-1.0"],
1678 ["-1.", "-1."],
1679 ["+1.", "+1."],
1680 [".1", ".1"],
1681 # should not match non-numbers
1682 ["1..", None],
1683 ["..", None],
1684 [".1.", None],
1685 # should match after comma
1686 [",1", "1"],
1687 [", 1", "1"],
1688 [", .1", ".1"],
1689 [", +.1", "+.1"],
1690 # should not match after trailing spaces
1691 [".1 ", None],
1692 # some complex cases
1693 ["0b_0011_1111_0100_1110", "0b_0011_1111_0100_1110"],
1694 ["0xdeadbeef", "0xdeadbeef"],
1695 ["0b_1110_0101", "0b_1110_0101"],
1696 # should not match if in an operation
1697 ["1 + 1", None],
1698 [", 1 + 1", None],
1699 ],
1700 )
1701 def test_match_numeric_literal_for_dict_key(input, expected):
1702 assert _match_number_in_dict_key_prefix(input) == expected
@@ -367,6 +367,7 b' class TestAutoreload(Fixture):'
367 self.shell.run_code("assert func2() == 'changed'")
367 self.shell.run_code("assert func2() == 'changed'")
368 self.shell.run_code("t = Test(); assert t.new_func() == 'changed'")
368 self.shell.run_code("t = Test(); assert t.new_func() == 'changed'")
369 self.shell.run_code("assert number == 1")
369 self.shell.run_code("assert number == 1")
370 if sys.version_info < (3, 12):
370 self.shell.run_code("assert TestEnum.B.value == 'added'")
371 self.shell.run_code("assert TestEnum.B.value == 'added'")
371
372
372 # ----------- TEST IMPORT FROM MODULE --------------------------
373 # ----------- TEST IMPORT FROM MODULE --------------------------
@@ -4,11 +4,14 b''
4 # Distributed under the terms of the Modified BSD License.
4 # Distributed under the terms of the Modified BSD License.
5
5
6 from unittest import TestCase
6 from unittest import TestCase
7 from pygments import __version__ as pygments_version
7 from pygments.token import Token
8 from pygments.token import Token
8 from pygments.lexers import BashLexer
9 from pygments.lexers import BashLexer
9
10
10 from .. import lexers
11 from .. import lexers
11
12
13 pyg214 = tuple(int(x) for x in pygments_version.split(".")[:2]) >= (2, 14)
14
12
15
13 class TestLexers(TestCase):
16 class TestLexers(TestCase):
14 """Collection of lexers tests"""
17 """Collection of lexers tests"""
@@ -18,25 +21,26 b' class TestLexers(TestCase):'
18
21
19 def testIPythonLexer(self):
22 def testIPythonLexer(self):
20 fragment = '!echo $HOME\n'
23 fragment = '!echo $HOME\n'
21 tokens = [
24 bash_tokens = [
22 (Token.Operator, '!'),
25 (Token.Operator, '!'),
23 ]
26 ]
24 tokens.extend(self.bash_lexer.get_tokens(fragment[1:]))
27 bash_tokens.extend(self.bash_lexer.get_tokens(fragment[1:]))
25 self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
28 ipylex_token = list(self.lexer.get_tokens(fragment))
29 assert bash_tokens[:-1] == ipylex_token[:-1]
26
30
27 fragment_2 = '!' + fragment
31 fragment_2 = "!" + fragment
28 tokens_2 = [
32 tokens_2 = [
29 (Token.Operator, '!!'),
33 (Token.Operator, '!!'),
30 ] + tokens[1:]
34 ] + bash_tokens[1:]
31 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
35 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
32
36
33 fragment_2 = '\t %%!\n' + fragment[1:]
37 fragment_2 = '\t %%!\n' + fragment[1:]
34 tokens_2 = [
38 tokens_2 = [
35 (Token.Text, '\t '),
39 (Token.Text, '\t '),
36 (Token.Operator, '%%!'),
40 (Token.Operator, '%%!'),
37 (Token.Text, '\n'),
41 (Token.Text, '\n'),
38 ] + tokens[1:]
42 ] + bash_tokens[1:]
39 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
43 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
40
44
41 fragment_2 = 'x = ' + fragment
45 fragment_2 = 'x = ' + fragment
42 tokens_2 = [
46 tokens_2 = [
@@ -44,8 +48,8 b' class TestLexers(TestCase):'
44 (Token.Text, ' '),
48 (Token.Text, ' '),
45 (Token.Operator, '='),
49 (Token.Operator, '='),
46 (Token.Text, ' '),
50 (Token.Text, ' '),
47 ] + tokens
51 ] + bash_tokens
48 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
52 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
49
53
50 fragment_2 = 'x, = ' + fragment
54 fragment_2 = 'x, = ' + fragment
51 tokens_2 = [
55 tokens_2 = [
@@ -54,8 +58,8 b' class TestLexers(TestCase):'
54 (Token.Text, ' '),
58 (Token.Text, ' '),
55 (Token.Operator, '='),
59 (Token.Operator, '='),
56 (Token.Text, ' '),
60 (Token.Text, ' '),
57 ] + tokens
61 ] + bash_tokens
58 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
62 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
59
63
60 fragment_2 = 'x, = %sx ' + fragment[1:]
64 fragment_2 = 'x, = %sx ' + fragment[1:]
61 tokens_2 = [
65 tokens_2 = [
@@ -67,8 +71,10 b' class TestLexers(TestCase):'
67 (Token.Operator, '%'),
71 (Token.Operator, '%'),
68 (Token.Keyword, 'sx'),
72 (Token.Keyword, 'sx'),
69 (Token.Text, ' '),
73 (Token.Text, ' '),
70 ] + tokens[1:]
74 ] + bash_tokens[1:]
71 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
75 if tokens_2[7] == (Token.Text, " ") and pyg214: # pygments 2.14+
76 tokens_2[7] = (Token.Text.Whitespace, " ")
77 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
72
78
73 fragment_2 = 'f = %R function () {}\n'
79 fragment_2 = 'f = %R function () {}\n'
74 tokens_2 = [
80 tokens_2 = [
@@ -80,7 +86,7 b' class TestLexers(TestCase):'
80 (Token.Keyword, 'R'),
86 (Token.Keyword, 'R'),
81 (Token.Text, ' function () {}\n'),
87 (Token.Text, ' function () {}\n'),
82 ]
88 ]
83 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
89 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
84
90
85 fragment_2 = '\t%%xyz\n$foo\n'
91 fragment_2 = '\t%%xyz\n$foo\n'
86 tokens_2 = [
92 tokens_2 = [
@@ -89,7 +95,7 b' class TestLexers(TestCase):'
89 (Token.Keyword, 'xyz'),
95 (Token.Keyword, 'xyz'),
90 (Token.Text, '\n$foo\n'),
96 (Token.Text, '\n$foo\n'),
91 ]
97 ]
92 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
98 assert tokens_2 == list(self.lexer.get_tokens(fragment_2))
93
99
94 fragment_2 = '%system?\n'
100 fragment_2 = '%system?\n'
95 tokens_2 = [
101 tokens_2 = [
@@ -98,7 +104,7 b' class TestLexers(TestCase):'
98 (Token.Operator, '?'),
104 (Token.Operator, '?'),
99 (Token.Text, '\n'),
105 (Token.Text, '\n'),
100 ]
106 ]
101 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
107 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
102
108
103 fragment_2 = 'x != y\n'
109 fragment_2 = 'x != y\n'
104 tokens_2 = [
110 tokens_2 = [
@@ -109,7 +115,7 b' class TestLexers(TestCase):'
109 (Token.Name, 'y'),
115 (Token.Name, 'y'),
110 (Token.Text, '\n'),
116 (Token.Text, '\n'),
111 ]
117 ]
112 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
118 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
113
119
114 fragment_2 = ' ?math.sin\n'
120 fragment_2 = ' ?math.sin\n'
115 tokens_2 = [
121 tokens_2 = [
@@ -118,7 +124,7 b' class TestLexers(TestCase):'
118 (Token.Text, 'math.sin'),
124 (Token.Text, 'math.sin'),
119 (Token.Text, '\n'),
125 (Token.Text, '\n'),
120 ]
126 ]
121 self.assertEqual(tokens_2, list(self.lexer.get_tokens(fragment_2)))
127 assert tokens_2[:-1] == list(self.lexer.get_tokens(fragment_2))[:-1]
122
128
123 fragment = ' *int*?\n'
129 fragment = ' *int*?\n'
124 tokens = [
130 tokens = [
@@ -126,7 +132,7 b' class TestLexers(TestCase):'
126 (Token.Operator, '?'),
132 (Token.Operator, '?'),
127 (Token.Text, '\n'),
133 (Token.Text, '\n'),
128 ]
134 ]
129 self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
135 assert tokens == list(self.lexer.get_tokens(fragment))
130
136
131 fragment = '%%writefile -a foo.py\nif a == b:\n pass'
137 fragment = '%%writefile -a foo.py\nif a == b:\n pass'
132 tokens = [
138 tokens = [
@@ -145,7 +151,9 b' class TestLexers(TestCase):'
145 (Token.Keyword, 'pass'),
151 (Token.Keyword, 'pass'),
146 (Token.Text, '\n'),
152 (Token.Text, '\n'),
147 ]
153 ]
148 self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
154 if tokens[10] == (Token.Text, "\n") and pyg214: # pygments 2.14+
155 tokens[10] = (Token.Text.Whitespace, "\n")
156 assert tokens[:-1] == list(self.lexer.get_tokens(fragment))[:-1]
149
157
150 fragment = '%%timeit\nmath.sin(0)'
158 fragment = '%%timeit\nmath.sin(0)'
151 tokens = [
159 tokens = [
@@ -173,4 +181,4 b' class TestLexers(TestCase):'
173 (Token.Punctuation, '>'),
181 (Token.Punctuation, '>'),
174 (Token.Text, '\n'),
182 (Token.Text, '\n'),
175 ]
183 ]
176 self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
184 assert tokens == list(self.lexer.get_tokens(fragment))
@@ -147,7 +147,7 b' class TerminalMagics(Magics):'
147
147
148 sentinel = opts.get('s', u'--')
148 sentinel = opts.get('s', u'--')
149 block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
149 block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
150 self.store_or_execute(block, name, store_history=False)
150 self.store_or_execute(block, name, store_history=True)
151
151
152 @line_magic
152 @line_magic
153 def paste(self, parameter_s=''):
153 def paste(self, parameter_s=''):
@@ -68,10 +68,15 b' def create_ipython_shortcuts(shell):'
68 reformat_text_before_cursor(event.current_buffer, event.current_buffer.document, shell)
68 reformat_text_before_cursor(event.current_buffer, event.current_buffer.document, shell)
69 event.current_buffer.validate_and_handle()
69 event.current_buffer.validate_and_handle()
70
70
71 kb.add('escape', 'enter', filter=(has_focus(DEFAULT_BUFFER)
71 @Condition
72 & ~has_selection
72 def ebivim():
73 & insert_mode
73 return shell.emacs_bindings_in_vi_insert_mode
74 ))(reformat_and_execute)
74
75 kb.add(
76 "escape",
77 "enter",
78 filter=(has_focus(DEFAULT_BUFFER) & ~has_selection & insert_mode & ebivim),
79 )(reformat_and_execute)
75
80
76 kb.add("c-\\")(quit)
81 kb.add("c-\\")(quit)
77
82
@@ -333,10 +338,6 b' def create_ipython_shortcuts(shell):'
333 if sys.platform == "win32":
338 if sys.platform == "win32":
334 kb.add("c-v", filter=(has_focus(DEFAULT_BUFFER) & ~vi_mode))(win_paste)
339 kb.add("c-v", filter=(has_focus(DEFAULT_BUFFER) & ~vi_mode))(win_paste)
335
340
336 @Condition
337 def ebivim():
338 return shell.emacs_bindings_in_vi_insert_mode
339
340 focused_insert_vi = has_focus(DEFAULT_BUFFER) & vi_insert_mode
341 focused_insert_vi = has_focus(DEFAULT_BUFFER) & vi_insert_mode
341
342
342 @kb.add("end", filter=has_focus(DEFAULT_BUFFER) & (ebivim | ~vi_insert_mode))
343 @kb.add("end", filter=has_focus(DEFAULT_BUFFER) & (ebivim | ~vi_insert_mode))
@@ -2,6 +2,44 b''
2 8.x Series
2 8.x Series
3 ============
3 ============
4
4
5 .. _version 8.8.0:
6
7 IPython 8.8.0
8 -------------
9
10 First release of IPython in 2023 as there was no release at the end of
11 December.
12
13 This is an unusually big release (relatively speaking) with more than 15 Pull
14 Requests merge.
15
16 Of particular interest are:
17
18 - :ghpull:`13852` that replace the greedy completer and improve
19 completion, in particular for dictionary keys.
20 - :ghpull:`13858` that adds ``py.typed`` to ``setup.cfg`` to make sure it is
21 bundled in wheels.
22 - :ghpull:`13869` that implements tab completions for IPython options in the
23 shell when using `argcomplete <https://github.com/kislyuk/argcomplete>`. I
24 believe this also needs a recent version of Traitlets.
25 - :ghpull:`13865` makes the ``inspector`` class of `InteractiveShell`
26 configurable.
27 - :ghpull:`13880` that remove minor-version entrypoints as the minor version
28 entry points that would be included in the wheel would be the one of the
29 Python version that was used to build the ``whl`` file.
30
31 In no particular order, the rest of the changes update the test suite to be
32 compatible with Pygments 2.14, various docfixes, testing on more recent python
33 versions and various updates.
34
35 As usual you can find the full list of PRs on GitHub under `the 8.8 milestone
36 <https://github.com/ipython/ipython/milestone/110>`__.
37
38 Many thanks to @krassowski for the many PRs and @jasongrout for reviewing and
39 merging contributions.
40
41 Thanks to the `D. E. Shaw group <https://deshaw.com/>`__ for sponsoring
42 work on IPython and related libraries.
5
43
6 .. _version 8.7.0:
44 .. _version 8.7.0:
7
45
@@ -100,17 +100,12 b' exclude ='
100 setupext
100 setupext
101
101
102 [options.package_data]
102 [options.package_data]
103 IPython = py.typed
103 IPython.core = profile/README*
104 IPython.core = profile/README*
104 IPython.core.tests = *.png, *.jpg, daft_extension/*.py
105 IPython.core.tests = *.png, *.jpg, daft_extension/*.py
105 IPython.lib.tests = *.wav
106 IPython.lib.tests = *.wav
106 IPython.testing.plugin = *.txt
107 IPython.testing.plugin = *.txt
107
108
108 [options.entry_points]
109 pygments.lexers =
110 ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer
111 ipython = IPython.lib.lexers:IPythonLexer
112 ipython3 = IPython.lib.lexers:IPython3Lexer
113
114 [velin]
109 [velin]
115 ignore_patterns =
110 ignore_patterns =
116 IPython/core/tests
111 IPython/core/tests
@@ -139,7 +139,15 b" setup_args['cmdclass'] = {"
139 'install_scripts_sym': install_scripts_for_symlink,
139 'install_scripts_sym': install_scripts_for_symlink,
140 'unsymlink': unsymlink,
140 'unsymlink': unsymlink,
141 }
141 }
142 setup_args["entry_points"] = {"console_scripts": find_entry_points()}
142
143 setup_args["entry_points"] = {
144 "console_scripts": find_entry_points(),
145 "pygments.lexers": [
146 "ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer",
147 "ipython = IPython.lib.lexers:IPythonLexer",
148 "ipython3 = IPython.lib.lexers:IPython3Lexer",
149 ],
150 }
143
151
144 #---------------------------------------------------------------------------
152 #---------------------------------------------------------------------------
145 # Do the actual setup now
153 # Do the actual setup now
@@ -211,20 +211,15 b' def find_entry_points():'
211 use, our own build_scripts_entrypt class below parses these and builds
211 use, our own build_scripts_entrypt class below parses these and builds
212 command line scripts.
212 command line scripts.
213
213
214 Each of our entry points gets a plain name, e.g. ipython, a name
214 Each of our entry points gets a plain name, e.g. ipython, and a name
215 suffixed with the Python major version number, e.g. ipython3, and
215 suffixed with the Python major version number, e.g. ipython3.
216 a name suffixed with the Python major.minor version number, eg. ipython3.8.
217 """
216 """
218 ep = [
217 ep = [
219 'ipython%s = IPython:start_ipython',
218 'ipython%s = IPython:start_ipython',
220 ]
219 ]
221 major_suffix = str(sys.version_info[0])
220 major_suffix = str(sys.version_info[0])
222 minor_suffix = ".".join([str(sys.version_info[0]), str(sys.version_info[1])])
221 return [e % "" for e in ep] + [e % major_suffix for e in ep]
223 return (
222
224 [e % "" for e in ep]
225 + [e % major_suffix for e in ep]
226 + [e % minor_suffix for e in ep]
227 )
228
223
229 class install_lib_symlink(Command):
224 class install_lib_symlink(Command):
230 user_options = [
225 user_options = [
@@ -2,15 +2,6 b''
2 # when releasing with bash, simple source it to get asked questions.
2 # when releasing with bash, simple source it to get asked questions.
3
3
4 # misc check before starting
4 # misc check before starting
5
6 python -c 'import keyring'
7 python -c 'import twine'
8 python -c 'import sphinx'
9 python -c 'import sphinx_rtd_theme'
10 python -c 'import pytest'
11 python -c 'import build'
12
13
14 BLACK=$(tput setaf 1)
5 BLACK=$(tput setaf 1)
15 RED=$(tput setaf 1)
6 RED=$(tput setaf 1)
16 GREEN=$(tput setaf 2)
7 GREEN=$(tput setaf 2)
@@ -22,6 +13,22 b' WHITE=$(tput setaf 7)'
22 NOR=$(tput sgr0)
13 NOR=$(tput sgr0)
23
14
24
15
16 echo "Checking all tools are installed..."
17
18 python -c 'import keyring'
19 python -c 'import twine'
20 python -c 'import sphinx'
21 python -c 'import sphinx_rtd_theme'
22 python -c 'import pytest'
23 python -c 'import build'
24 # those are necessary fo building the docs
25 echo "Checking imports for docs"
26 python -c 'import numpy'
27 python -c 'import matplotlib'
28
29
30
31
25 echo "Will use $BLUE'$EDITOR'$NOR to edit files when necessary"
32 echo "Will use $BLUE'$EDITOR'$NOR to edit files when necessary"
26 echo -n "PREV_RELEASE (X.y.z) [$PREV_RELEASE]: "
33 echo -n "PREV_RELEASE (X.y.z) [$PREV_RELEASE]: "
27 read input
34 read input
General Comments 0
You need to be logged in to leave comments. Login now