Show More
The requested changes are too big and content was truncated. Show full diff
This diff has been collapsed as it changes many lines, (738 lines changed) Show them Hide them | |||
@@ -0,0 +1,738 b'' | |||
|
1 | from typing import ( | |
|
2 | Any, | |
|
3 | Callable, | |
|
4 | Dict, | |
|
5 | Set, | |
|
6 | Sequence, | |
|
7 | Tuple, | |
|
8 | NamedTuple, | |
|
9 | Type, | |
|
10 | Literal, | |
|
11 | Union, | |
|
12 | TYPE_CHECKING, | |
|
13 | ) | |
|
14 | import ast | |
|
15 | import builtins | |
|
16 | import collections | |
|
17 | import operator | |
|
18 | import sys | |
|
19 | from functools import cached_property | |
|
20 | from dataclasses import dataclass, field | |
|
21 | ||
|
22 | from IPython.utils.docs import GENERATING_DOCUMENTATION | |
|
23 | from IPython.utils.decorators import undoc | |
|
24 | ||
|
25 | ||
|
26 | if TYPE_CHECKING or GENERATING_DOCUMENTATION: | |
|
27 | from typing_extensions import Protocol | |
|
28 | else: | |
|
29 | # do not require on runtime | |
|
30 | Protocol = object # requires Python >=3.8 | |
|
31 | ||
|
32 | ||
|
33 | @undoc | |
|
34 | class HasGetItem(Protocol): | |
|
35 | def __getitem__(self, key) -> None: | |
|
36 | ... | |
|
37 | ||
|
38 | ||
|
39 | @undoc | |
|
40 | class InstancesHaveGetItem(Protocol): | |
|
41 | def __call__(self, *args, **kwargs) -> HasGetItem: | |
|
42 | ... | |
|
43 | ||
|
44 | ||
|
45 | @undoc | |
|
46 | class HasGetAttr(Protocol): | |
|
47 | def __getattr__(self, key) -> None: | |
|
48 | ... | |
|
49 | ||
|
50 | ||
|
51 | @undoc | |
|
52 | class DoesNotHaveGetAttr(Protocol): | |
|
53 | pass | |
|
54 | ||
|
55 | ||
|
56 | # By default `__getattr__` is not explicitly implemented on most objects | |
|
57 | MayHaveGetattr = Union[HasGetAttr, DoesNotHaveGetAttr] | |
|
58 | ||
|
59 | ||
|
60 | def _unbind_method(func: Callable) -> Union[Callable, None]: | |
|
61 | """Get unbound method for given bound method. | |
|
62 | ||
|
63 | Returns None if cannot get unbound method, or method is already unbound. | |
|
64 | """ | |
|
65 | owner = getattr(func, "__self__", None) | |
|
66 | owner_class = type(owner) | |
|
67 | name = getattr(func, "__name__", None) | |
|
68 | instance_dict_overrides = getattr(owner, "__dict__", None) | |
|
69 | if ( | |
|
70 | owner is not None | |
|
71 | and name | |
|
72 | and ( | |
|
73 | not instance_dict_overrides | |
|
74 | or (instance_dict_overrides and name not in instance_dict_overrides) | |
|
75 | ) | |
|
76 | ): | |
|
77 | return getattr(owner_class, name) | |
|
78 | return None | |
|
79 | ||
|
80 | ||
|
81 | @undoc | |
|
82 | @dataclass | |
|
83 | class EvaluationPolicy: | |
|
84 | """Definition of evaluation policy.""" | |
|
85 | ||
|
86 | allow_locals_access: bool = False | |
|
87 | allow_globals_access: bool = False | |
|
88 | allow_item_access: bool = False | |
|
89 | allow_attr_access: bool = False | |
|
90 | allow_builtins_access: bool = False | |
|
91 | allow_all_operations: bool = False | |
|
92 | allow_any_calls: bool = False | |
|
93 | allowed_calls: Set[Callable] = field(default_factory=set) | |
|
94 | ||
|
95 | def can_get_item(self, value, item): | |
|
96 | return self.allow_item_access | |
|
97 | ||
|
98 | def can_get_attr(self, value, attr): | |
|
99 | return self.allow_attr_access | |
|
100 | ||
|
101 | def can_operate(self, dunders: Tuple[str, ...], a, b=None): | |
|
102 | if self.allow_all_operations: | |
|
103 | return True | |
|
104 | ||
|
105 | def can_call(self, func): | |
|
106 | if self.allow_any_calls: | |
|
107 | return True | |
|
108 | ||
|
109 | if func in self.allowed_calls: | |
|
110 | return True | |
|
111 | ||
|
112 | owner_method = _unbind_method(func) | |
|
113 | ||
|
114 | if owner_method and owner_method in self.allowed_calls: | |
|
115 | return True | |
|
116 | ||
|
117 | ||
|
118 | def _get_external(module_name: str, access_path: Sequence[str]): | |
|
119 | """Get value from external module given a dotted access path. | |
|
120 | ||
|
121 | Raises: | |
|
122 | * `KeyError` if module is removed not found, and | |
|
123 | * `AttributeError` if acess path does not match an exported object | |
|
124 | """ | |
|
125 | member_type = sys.modules[module_name] | |
|
126 | for attr in access_path: | |
|
127 | member_type = getattr(member_type, attr) | |
|
128 | return member_type | |
|
129 | ||
|
130 | ||
|
131 | def _has_original_dunder_external( | |
|
132 | value, | |
|
133 | module_name: str, | |
|
134 | access_path: Sequence[str], | |
|
135 | method_name: str, | |
|
136 | ): | |
|
137 | if module_name not in sys.modules: | |
|
138 | # LBYLB as it is faster | |
|
139 | return False | |
|
140 | try: | |
|
141 | member_type = _get_external(module_name, access_path) | |
|
142 | value_type = type(value) | |
|
143 | if type(value) == member_type: | |
|
144 | return True | |
|
145 | if method_name == "__getattribute__": | |
|
146 | # we have to short-circuit here due to an unresolved issue in | |
|
147 | # `isinstance` implementation: https://bugs.python.org/issue32683 | |
|
148 | return False | |
|
149 | if isinstance(value, member_type): | |
|
150 | method = getattr(value_type, method_name, None) | |
|
151 | member_method = getattr(member_type, method_name, None) | |
|
152 | if member_method == method: | |
|
153 | return True | |
|
154 | except (AttributeError, KeyError): | |
|
155 | return False | |
|
156 | ||
|
157 | ||
|
158 | def _has_original_dunder( | |
|
159 | value, allowed_types, allowed_methods, allowed_external, method_name | |
|
160 | ): | |
|
161 | # note: Python ignores `__getattr__`/`__getitem__` on instances, | |
|
162 | # we only need to check at class level | |
|
163 | value_type = type(value) | |
|
164 | ||
|
165 | # strict type check passes → no need to check method | |
|
166 | if value_type in allowed_types: | |
|
167 | return True | |
|
168 | ||
|
169 | method = getattr(value_type, method_name, None) | |
|
170 | ||
|
171 | if method is None: | |
|
172 | return None | |
|
173 | ||
|
174 | if method in allowed_methods: | |
|
175 | return True | |
|
176 | ||
|
177 | for module_name, *access_path in allowed_external: | |
|
178 | if _has_original_dunder_external(value, module_name, access_path, method_name): | |
|
179 | return True | |
|
180 | ||
|
181 | return False | |
|
182 | ||
|
183 | ||
|
184 | @undoc | |
|
185 | @dataclass | |
|
186 | class SelectivePolicy(EvaluationPolicy): | |
|
187 | allowed_getitem: Set[InstancesHaveGetItem] = field(default_factory=set) | |
|
188 | allowed_getitem_external: Set[Tuple[str, ...]] = field(default_factory=set) | |
|
189 | ||
|
190 | allowed_getattr: Set[MayHaveGetattr] = field(default_factory=set) | |
|
191 | allowed_getattr_external: Set[Tuple[str, ...]] = field(default_factory=set) | |
|
192 | ||
|
193 | allowed_operations: Set = field(default_factory=set) | |
|
194 | allowed_operations_external: Set[Tuple[str, ...]] = field(default_factory=set) | |
|
195 | ||
|
196 | _operation_methods_cache: Dict[str, Set[Callable]] = field( | |
|
197 | default_factory=dict, init=False | |
|
198 | ) | |
|
199 | ||
|
200 | def can_get_attr(self, value, attr): | |
|
201 | has_original_attribute = _has_original_dunder( | |
|
202 | value, | |
|
203 | allowed_types=self.allowed_getattr, | |
|
204 | allowed_methods=self._getattribute_methods, | |
|
205 | allowed_external=self.allowed_getattr_external, | |
|
206 | method_name="__getattribute__", | |
|
207 | ) | |
|
208 | has_original_attr = _has_original_dunder( | |
|
209 | value, | |
|
210 | allowed_types=self.allowed_getattr, | |
|
211 | allowed_methods=self._getattr_methods, | |
|
212 | allowed_external=self.allowed_getattr_external, | |
|
213 | method_name="__getattr__", | |
|
214 | ) | |
|
215 | ||
|
216 | accept = False | |
|
217 | ||
|
218 | # Many objects do not have `__getattr__`, this is fine. | |
|
219 | if has_original_attr is None and has_original_attribute: | |
|
220 | accept = True | |
|
221 | else: | |
|
222 | # Accept objects without modifications to `__getattr__` and `__getattribute__` | |
|
223 | accept = has_original_attr and has_original_attribute | |
|
224 | ||
|
225 | if accept: | |
|
226 | # We still need to check for overriden properties. | |
|
227 | ||
|
228 | value_class = type(value) | |
|
229 | if not hasattr(value_class, attr): | |
|
230 | return True | |
|
231 | ||
|
232 | class_attr_val = getattr(value_class, attr) | |
|
233 | is_property = isinstance(class_attr_val, property) | |
|
234 | ||
|
235 | if not is_property: | |
|
236 | return True | |
|
237 | ||
|
238 | # Properties in allowed types are ok (although we do not include any | |
|
239 | # properties in our default allow list currently). | |
|
240 | if type(value) in self.allowed_getattr: | |
|
241 | return True # pragma: no cover | |
|
242 | ||
|
243 | # Properties in subclasses of allowed types may be ok if not changed | |
|
244 | for module_name, *access_path in self.allowed_getattr_external: | |
|
245 | try: | |
|
246 | external_class = _get_external(module_name, access_path) | |
|
247 | external_class_attr_val = getattr(external_class, attr) | |
|
248 | except (KeyError, AttributeError): | |
|
249 | return False # pragma: no cover | |
|
250 | return class_attr_val == external_class_attr_val | |
|
251 | ||
|
252 | return False | |
|
253 | ||
|
254 | def can_get_item(self, value, item): | |
|
255 | """Allow accessing `__getiitem__` of allow-listed instances unless it was not modified.""" | |
|
256 | return _has_original_dunder( | |
|
257 | value, | |
|
258 | allowed_types=self.allowed_getitem, | |
|
259 | allowed_methods=self._getitem_methods, | |
|
260 | allowed_external=self.allowed_getitem_external, | |
|
261 | method_name="__getitem__", | |
|
262 | ) | |
|
263 | ||
|
264 | def can_operate(self, dunders: Tuple[str, ...], a, b=None): | |
|
265 | objects = [a] | |
|
266 | if b is not None: | |
|
267 | objects.append(b) | |
|
268 | return all( | |
|
269 | [ | |
|
270 | _has_original_dunder( | |
|
271 | obj, | |
|
272 | allowed_types=self.allowed_operations, | |
|
273 | allowed_methods=self._operator_dunder_methods(dunder), | |
|
274 | allowed_external=self.allowed_operations_external, | |
|
275 | method_name=dunder, | |
|
276 | ) | |
|
277 | for dunder in dunders | |
|
278 | for obj in objects | |
|
279 | ] | |
|
280 | ) | |
|
281 | ||
|
282 | def _operator_dunder_methods(self, dunder: str) -> Set[Callable]: | |
|
283 | if dunder not in self._operation_methods_cache: | |
|
284 | self._operation_methods_cache[dunder] = self._safe_get_methods( | |
|
285 | self.allowed_operations, dunder | |
|
286 | ) | |
|
287 | return self._operation_methods_cache[dunder] | |
|
288 | ||
|
289 | @cached_property | |
|
290 | def _getitem_methods(self) -> Set[Callable]: | |
|
291 | return self._safe_get_methods(self.allowed_getitem, "__getitem__") | |
|
292 | ||
|
293 | @cached_property | |
|
294 | def _getattr_methods(self) -> Set[Callable]: | |
|
295 | return self._safe_get_methods(self.allowed_getattr, "__getattr__") | |
|
296 | ||
|
297 | @cached_property | |
|
298 | def _getattribute_methods(self) -> Set[Callable]: | |
|
299 | return self._safe_get_methods(self.allowed_getattr, "__getattribute__") | |
|
300 | ||
|
301 | def _safe_get_methods(self, classes, name) -> Set[Callable]: | |
|
302 | return { | |
|
303 | method | |
|
304 | for class_ in classes | |
|
305 | for method in [getattr(class_, name, None)] | |
|
306 | if method | |
|
307 | } | |
|
308 | ||
|
309 | ||
|
310 | class _DummyNamedTuple(NamedTuple): | |
|
311 | """Used internally to retrieve methods of named tuple instance.""" | |
|
312 | ||
|
313 | ||
|
314 | class EvaluationContext(NamedTuple): | |
|
315 | #: Local namespace | |
|
316 | locals: dict | |
|
317 | #: Global namespace | |
|
318 | globals: dict | |
|
319 | #: Evaluation policy identifier | |
|
320 | evaluation: Literal[ | |
|
321 | "forbidden", "minimal", "limited", "unsafe", "dangerous" | |
|
322 | ] = "forbidden" | |
|
323 | #: Whether the evalution of code takes place inside of a subscript. | |
|
324 | #: Useful for evaluating ``:-1, 'col'`` in ``df[:-1, 'col']``. | |
|
325 | in_subscript: bool = False | |
|
326 | ||
|
327 | ||
|
328 | class _IdentitySubscript: | |
|
329 | """Returns the key itself when item is requested via subscript.""" | |
|
330 | ||
|
331 | def __getitem__(self, key): | |
|
332 | return key | |
|
333 | ||
|
334 | ||
|
335 | IDENTITY_SUBSCRIPT = _IdentitySubscript() | |
|
336 | SUBSCRIPT_MARKER = "__SUBSCRIPT_SENTINEL__" | |
|
337 | ||
|
338 | ||
|
339 | class GuardRejection(Exception): | |
|
340 | """Exception raised when guard rejects evaluation attempt.""" | |
|
341 | ||
|
342 | pass | |
|
343 | ||
|
344 | ||
|
345 | def guarded_eval(code: str, context: EvaluationContext): | |
|
346 | """Evaluate provided code in the evaluation context. | |
|
347 | ||
|
348 | If evaluation policy given by context is set to ``forbidden`` | |
|
349 | no evaluation will be performed; if it is set to ``dangerous`` | |
|
350 | standard :func:`eval` will be used; finally, for any other, | |
|
351 | policy :func:`eval_node` will be called on parsed AST. | |
|
352 | """ | |
|
353 | locals_ = context.locals | |
|
354 | ||
|
355 | if context.evaluation == "forbidden": | |
|
356 | raise GuardRejection("Forbidden mode") | |
|
357 | ||
|
358 | # note: not using `ast.literal_eval` as it does not implement | |
|
359 | # getitem at all, for example it fails on simple `[0][1]` | |
|
360 | ||
|
361 | if context.in_subscript: | |
|
362 | # syntatic sugar for ellipsis (:) is only available in susbcripts | |
|
363 | # so we need to trick the ast parser into thinking that we have | |
|
364 | # a subscript, but we need to be able to later recognise that we did | |
|
365 | # it so we can ignore the actual __getitem__ operation | |
|
366 | if not code: | |
|
367 | return tuple() | |
|
368 | locals_ = locals_.copy() | |
|
369 | locals_[SUBSCRIPT_MARKER] = IDENTITY_SUBSCRIPT | |
|
370 | code = SUBSCRIPT_MARKER + "[" + code + "]" | |
|
371 | context = EvaluationContext(**{**context._asdict(), **{"locals": locals_}}) | |
|
372 | ||
|
373 | if context.evaluation == "dangerous": | |
|
374 | return eval(code, context.globals, context.locals) | |
|
375 | ||
|
376 | expression = ast.parse(code, mode="eval") | |
|
377 | ||
|
378 | return eval_node(expression, context) | |
|
379 | ||
|
380 | ||
|
381 | BINARY_OP_DUNDERS: Dict[Type[ast.operator], Tuple[str]] = { | |
|
382 | ast.Add: ("__add__",), | |
|
383 | ast.Sub: ("__sub__",), | |
|
384 | ast.Mult: ("__mul__",), | |
|
385 | ast.Div: ("__truediv__",), | |
|
386 | ast.FloorDiv: ("__floordiv__",), | |
|
387 | ast.Mod: ("__mod__",), | |
|
388 | ast.Pow: ("__pow__",), | |
|
389 | ast.LShift: ("__lshift__",), | |
|
390 | ast.RShift: ("__rshift__",), | |
|
391 | ast.BitOr: ("__or__",), | |
|
392 | ast.BitXor: ("__xor__",), | |
|
393 | ast.BitAnd: ("__and__",), | |
|
394 | ast.MatMult: ("__matmul__",), | |
|
395 | } | |
|
396 | ||
|
397 | COMP_OP_DUNDERS: Dict[Type[ast.cmpop], Tuple[str, ...]] = { | |
|
398 | ast.Eq: ("__eq__",), | |
|
399 | ast.NotEq: ("__ne__", "__eq__"), | |
|
400 | ast.Lt: ("__lt__", "__gt__"), | |
|
401 | ast.LtE: ("__le__", "__ge__"), | |
|
402 | ast.Gt: ("__gt__", "__lt__"), | |
|
403 | ast.GtE: ("__ge__", "__le__"), | |
|
404 | ast.In: ("__contains__",), | |
|
405 | # Note: ast.Is, ast.IsNot, ast.NotIn are handled specially | |
|
406 | } | |
|
407 | ||
|
408 | UNARY_OP_DUNDERS: Dict[Type[ast.unaryop], Tuple[str, ...]] = { | |
|
409 | ast.USub: ("__neg__",), | |
|
410 | ast.UAdd: ("__pos__",), | |
|
411 | # we have to check both __inv__ and __invert__! | |
|
412 | ast.Invert: ("__invert__", "__inv__"), | |
|
413 | ast.Not: ("__not__",), | |
|
414 | } | |
|
415 | ||
|
416 | ||
|
417 | def _find_dunder(node_op, dunders) -> Union[Tuple[str, ...], None]: | |
|
418 | dunder = None | |
|
419 | for op, candidate_dunder in dunders.items(): | |
|
420 | if isinstance(node_op, op): | |
|
421 | dunder = candidate_dunder | |
|
422 | return dunder | |
|
423 | ||
|
424 | ||
|
425 | def eval_node(node: Union[ast.AST, None], context: EvaluationContext): | |
|
426 | """Evaluate AST node in provided context. | |
|
427 | ||
|
428 | Applies evaluation restrictions defined in the context. Currently does not support evaluation of functions with keyword arguments. | |
|
429 | ||
|
430 | Does not evaluate actions that always have side effects: | |
|
431 | ||
|
432 | - class definitions (``class sth: ...``) | |
|
433 | - function definitions (``def sth: ...``) | |
|
434 | - variable assignments (``x = 1``) | |
|
435 | - augmented assignments (``x += 1``) | |
|
436 | - deletions (``del x``) | |
|
437 | ||
|
438 | Does not evaluate operations which do not return values: | |
|
439 | ||
|
440 | - assertions (``assert x``) | |
|
441 | - pass (``pass``) | |
|
442 | - imports (``import x``) | |
|
443 | - control flow: | |
|
444 | ||
|
445 | - conditionals (``if x:``) except for ternary IfExp (``a if x else b``) | |
|
446 | - loops (``for`` and `while``) | |
|
447 | - exception handling | |
|
448 | ||
|
449 | The purpose of this function is to guard against unwanted side-effects; | |
|
450 | it does not give guarantees on protection from malicious code execution. | |
|
451 | """ | |
|
452 | policy = EVALUATION_POLICIES[context.evaluation] | |
|
453 | if node is None: | |
|
454 | return None | |
|
455 | if isinstance(node, ast.Expression): | |
|
456 | return eval_node(node.body, context) | |
|
457 | if isinstance(node, ast.BinOp): | |
|
458 | left = eval_node(node.left, context) | |
|
459 | right = eval_node(node.right, context) | |
|
460 | dunders = _find_dunder(node.op, BINARY_OP_DUNDERS) | |
|
461 | if dunders: | |
|
462 | if policy.can_operate(dunders, left, right): | |
|
463 | return getattr(left, dunders[0])(right) | |
|
464 | else: | |
|
465 | raise GuardRejection( | |
|
466 | f"Operation (`{dunders}`) for", | |
|
467 | type(left), | |
|
468 | f"not allowed in {context.evaluation} mode", | |
|
469 | ) | |
|
470 | if isinstance(node, ast.Compare): | |
|
471 | left = eval_node(node.left, context) | |
|
472 | all_true = True | |
|
473 | negate = False | |
|
474 | for op, right in zip(node.ops, node.comparators): | |
|
475 | right = eval_node(right, context) | |
|
476 | dunder = None | |
|
477 | dunders = _find_dunder(op, COMP_OP_DUNDERS) | |
|
478 | if not dunders: | |
|
479 | if isinstance(op, ast.NotIn): | |
|
480 | dunders = COMP_OP_DUNDERS[ast.In] | |
|
481 | negate = True | |
|
482 | if isinstance(op, ast.Is): | |
|
483 | dunder = "is_" | |
|
484 | if isinstance(op, ast.IsNot): | |
|
485 | dunder = "is_" | |
|
486 | negate = True | |
|
487 | if not dunder and dunders: | |
|
488 | dunder = dunders[0] | |
|
489 | if dunder: | |
|
490 | a, b = (right, left) if dunder == "__contains__" else (left, right) | |
|
491 | if dunder == "is_" or dunders and policy.can_operate(dunders, a, b): | |
|
492 | result = getattr(operator, dunder)(a, b) | |
|
493 | if negate: | |
|
494 | result = not result | |
|
495 | if not result: | |
|
496 | all_true = False | |
|
497 | left = right | |
|
498 | else: | |
|
499 | raise GuardRejection( | |
|
500 | f"Comparison (`{dunder}`) for", | |
|
501 | type(left), | |
|
502 | f"not allowed in {context.evaluation} mode", | |
|
503 | ) | |
|
504 | else: | |
|
505 | raise ValueError( | |
|
506 | f"Comparison `{dunder}` not supported" | |
|
507 | ) # pragma: no cover | |
|
508 | return all_true | |
|
509 | if isinstance(node, ast.Constant): | |
|
510 | return node.value | |
|
511 | if isinstance(node, ast.Index): | |
|
512 | # deprecated since Python 3.9 | |
|
513 | return eval_node(node.value, context) # pragma: no cover | |
|
514 | if isinstance(node, ast.Tuple): | |
|
515 | return tuple(eval_node(e, context) for e in node.elts) | |
|
516 | if isinstance(node, ast.List): | |
|
517 | return [eval_node(e, context) for e in node.elts] | |
|
518 | if isinstance(node, ast.Set): | |
|
519 | return {eval_node(e, context) for e in node.elts} | |
|
520 | if isinstance(node, ast.Dict): | |
|
521 | return dict( | |
|
522 | zip( | |
|
523 | [eval_node(k, context) for k in node.keys], | |
|
524 | [eval_node(v, context) for v in node.values], | |
|
525 | ) | |
|
526 | ) | |
|
527 | if isinstance(node, ast.Slice): | |
|
528 | return slice( | |
|
529 | eval_node(node.lower, context), | |
|
530 | eval_node(node.upper, context), | |
|
531 | eval_node(node.step, context), | |
|
532 | ) | |
|
533 | if isinstance(node, ast.ExtSlice): | |
|
534 | # deprecated since Python 3.9 | |
|
535 | return tuple([eval_node(dim, context) for dim in node.dims]) # pragma: no cover | |
|
536 | if isinstance(node, ast.UnaryOp): | |
|
537 | value = eval_node(node.operand, context) | |
|
538 | dunders = _find_dunder(node.op, UNARY_OP_DUNDERS) | |
|
539 | if dunders: | |
|
540 | if policy.can_operate(dunders, value): | |
|
541 | return getattr(value, dunders[0])() | |
|
542 | else: | |
|
543 | raise GuardRejection( | |
|
544 | f"Operation (`{dunders}`) for", | |
|
545 | type(value), | |
|
546 | f"not allowed in {context.evaluation} mode", | |
|
547 | ) | |
|
548 | if isinstance(node, ast.Subscript): | |
|
549 | value = eval_node(node.value, context) | |
|
550 | slice_ = eval_node(node.slice, context) | |
|
551 | if policy.can_get_item(value, slice_): | |
|
552 | return value[slice_] | |
|
553 | raise GuardRejection( | |
|
554 | "Subscript access (`__getitem__`) for", | |
|
555 | type(value), # not joined to avoid calling `repr` | |
|
556 | f" not allowed in {context.evaluation} mode", | |
|
557 | ) | |
|
558 | if isinstance(node, ast.Name): | |
|
559 | if policy.allow_locals_access and node.id in context.locals: | |
|
560 | return context.locals[node.id] | |
|
561 | if policy.allow_globals_access and node.id in context.globals: | |
|
562 | return context.globals[node.id] | |
|
563 | if policy.allow_builtins_access and hasattr(builtins, node.id): | |
|
564 | # note: do not use __builtins__, it is implementation detail of cPython | |
|
565 | return getattr(builtins, node.id) | |
|
566 | if not policy.allow_globals_access and not policy.allow_locals_access: | |
|
567 | raise GuardRejection( | |
|
568 | f"Namespace access not allowed in {context.evaluation} mode" | |
|
569 | ) | |
|
570 | else: | |
|
571 | raise NameError(f"{node.id} not found in locals, globals, nor builtins") | |
|
572 | if isinstance(node, ast.Attribute): | |
|
573 | value = eval_node(node.value, context) | |
|
574 | if policy.can_get_attr(value, node.attr): | |
|
575 | return getattr(value, node.attr) | |
|
576 | raise GuardRejection( | |
|
577 | "Attribute access (`__getattr__`) for", | |
|
578 | type(value), # not joined to avoid calling `repr` | |
|
579 | f"not allowed in {context.evaluation} mode", | |
|
580 | ) | |
|
581 | if isinstance(node, ast.IfExp): | |
|
582 | test = eval_node(node.test, context) | |
|
583 | if test: | |
|
584 | return eval_node(node.body, context) | |
|
585 | else: | |
|
586 | return eval_node(node.orelse, context) | |
|
587 | if isinstance(node, ast.Call): | |
|
588 | func = eval_node(node.func, context) | |
|
589 | if policy.can_call(func) and not node.keywords: | |
|
590 | args = [eval_node(arg, context) for arg in node.args] | |
|
591 | return func(*args) | |
|
592 | raise GuardRejection( | |
|
593 | "Call for", | |
|
594 | func, # not joined to avoid calling `repr` | |
|
595 | f"not allowed in {context.evaluation} mode", | |
|
596 | ) | |
|
597 | raise ValueError("Unhandled node", ast.dump(node)) | |
|
598 | ||
|
599 | ||
|
600 | SUPPORTED_EXTERNAL_GETITEM = { | |
|
601 | ("pandas", "core", "indexing", "_iLocIndexer"), | |
|
602 | ("pandas", "core", "indexing", "_LocIndexer"), | |
|
603 | ("pandas", "DataFrame"), | |
|
604 | ("pandas", "Series"), | |
|
605 | ("numpy", "ndarray"), | |
|
606 | ("numpy", "void"), | |
|
607 | } | |
|
608 | ||
|
609 | ||
|
610 | BUILTIN_GETITEM: Set[InstancesHaveGetItem] = { | |
|
611 | dict, | |
|
612 | str, | |
|
613 | bytes, | |
|
614 | list, | |
|
615 | tuple, | |
|
616 | collections.defaultdict, | |
|
617 | collections.deque, | |
|
618 | collections.OrderedDict, | |
|
619 | collections.ChainMap, | |
|
620 | collections.UserDict, | |
|
621 | collections.UserList, | |
|
622 | collections.UserString, | |
|
623 | _DummyNamedTuple, | |
|
624 | _IdentitySubscript, | |
|
625 | } | |
|
626 | ||
|
627 | ||
|
628 | def _list_methods(cls, source=None): | |
|
629 | """For use on immutable objects or with methods returning a copy""" | |
|
630 | return [getattr(cls, k) for k in (source if source else dir(cls))] | |
|
631 | ||
|
632 | ||
|
633 | dict_non_mutating_methods = ("copy", "keys", "values", "items") | |
|
634 | list_non_mutating_methods = ("copy", "index", "count") | |
|
635 | set_non_mutating_methods = set(dir(set)) & set(dir(frozenset)) | |
|
636 | ||
|
637 | ||
|
638 | dict_keys: Type[collections.abc.KeysView] = type({}.keys()) | |
|
639 | method_descriptor: Any = type(list.copy) | |
|
640 | ||
|
641 | NUMERICS = {int, float, complex} | |
|
642 | ||
|
643 | ALLOWED_CALLS = { | |
|
644 | bytes, | |
|
645 | *_list_methods(bytes), | |
|
646 | dict, | |
|
647 | *_list_methods(dict, dict_non_mutating_methods), | |
|
648 | dict_keys.isdisjoint, | |
|
649 | list, | |
|
650 | *_list_methods(list, list_non_mutating_methods), | |
|
651 | set, | |
|
652 | *_list_methods(set, set_non_mutating_methods), | |
|
653 | frozenset, | |
|
654 | *_list_methods(frozenset), | |
|
655 | range, | |
|
656 | str, | |
|
657 | *_list_methods(str), | |
|
658 | tuple, | |
|
659 | *_list_methods(tuple), | |
|
660 | *NUMERICS, | |
|
661 | *[method for numeric_cls in NUMERICS for method in _list_methods(numeric_cls)], | |
|
662 | collections.deque, | |
|
663 | *_list_methods(collections.deque, list_non_mutating_methods), | |
|
664 | collections.defaultdict, | |
|
665 | *_list_methods(collections.defaultdict, dict_non_mutating_methods), | |
|
666 | collections.OrderedDict, | |
|
667 | *_list_methods(collections.OrderedDict, dict_non_mutating_methods), | |
|
668 | collections.UserDict, | |
|
669 | *_list_methods(collections.UserDict, dict_non_mutating_methods), | |
|
670 | collections.UserList, | |
|
671 | *_list_methods(collections.UserList, list_non_mutating_methods), | |
|
672 | collections.UserString, | |
|
673 | *_list_methods(collections.UserString, dir(str)), | |
|
674 | collections.Counter, | |
|
675 | *_list_methods(collections.Counter, dict_non_mutating_methods), | |
|
676 | collections.Counter.elements, | |
|
677 | collections.Counter.most_common, | |
|
678 | } | |
|
679 | ||
|
680 | BUILTIN_GETATTR: Set[MayHaveGetattr] = { | |
|
681 | *BUILTIN_GETITEM, | |
|
682 | set, | |
|
683 | frozenset, | |
|
684 | object, | |
|
685 | type, # `type` handles a lot of generic cases, e.g. numbers as in `int.real`. | |
|
686 | *NUMERICS, | |
|
687 | dict_keys, | |
|
688 | method_descriptor, | |
|
689 | } | |
|
690 | ||
|
691 | ||
|
692 | BUILTIN_OPERATIONS = {*BUILTIN_GETATTR} | |
|
693 | ||
|
694 | EVALUATION_POLICIES = { | |
|
695 | "minimal": EvaluationPolicy( | |
|
696 | allow_builtins_access=True, | |
|
697 | allow_locals_access=False, | |
|
698 | allow_globals_access=False, | |
|
699 | allow_item_access=False, | |
|
700 | allow_attr_access=False, | |
|
701 | allowed_calls=set(), | |
|
702 | allow_any_calls=False, | |
|
703 | allow_all_operations=False, | |
|
704 | ), | |
|
705 | "limited": SelectivePolicy( | |
|
706 | allowed_getitem=BUILTIN_GETITEM, | |
|
707 | allowed_getitem_external=SUPPORTED_EXTERNAL_GETITEM, | |
|
708 | allowed_getattr=BUILTIN_GETATTR, | |
|
709 | allowed_getattr_external={ | |
|
710 | # pandas Series/Frame implements custom `__getattr__` | |
|
711 | ("pandas", "DataFrame"), | |
|
712 | ("pandas", "Series"), | |
|
713 | }, | |
|
714 | allowed_operations=BUILTIN_OPERATIONS, | |
|
715 | allow_builtins_access=True, | |
|
716 | allow_locals_access=True, | |
|
717 | allow_globals_access=True, | |
|
718 | allowed_calls=ALLOWED_CALLS, | |
|
719 | ), | |
|
720 | "unsafe": EvaluationPolicy( | |
|
721 | allow_builtins_access=True, | |
|
722 | allow_locals_access=True, | |
|
723 | allow_globals_access=True, | |
|
724 | allow_attr_access=True, | |
|
725 | allow_item_access=True, | |
|
726 | allow_any_calls=True, | |
|
727 | allow_all_operations=True, | |
|
728 | ), | |
|
729 | } | |
|
730 | ||
|
731 | ||
|
732 | __all__ = [ | |
|
733 | "guarded_eval", | |
|
734 | "eval_node", | |
|
735 | "GuardRejection", | |
|
736 | "EvaluationContext", | |
|
737 | "_unbind_method", | |
|
738 | ] |
This diff has been collapsed as it changes many lines, (570 lines changed) Show them Hide them | |||
@@ -0,0 +1,570 b'' | |||
|
1 | from contextlib import contextmanager | |
|
2 | from typing import NamedTuple | |
|
3 | from functools import partial | |
|
4 | from IPython.core.guarded_eval import ( | |
|
5 | EvaluationContext, | |
|
6 | GuardRejection, | |
|
7 | guarded_eval, | |
|
8 | _unbind_method, | |
|
9 | ) | |
|
10 | from IPython.testing import decorators as dec | |
|
11 | import pytest | |
|
12 | ||
|
13 | ||
|
14 | def create_context(evaluation: str, **kwargs): | |
|
15 | return EvaluationContext(locals=kwargs, globals={}, evaluation=evaluation) | |
|
16 | ||
|
17 | ||
|
18 | forbidden = partial(create_context, "forbidden") | |
|
19 | minimal = partial(create_context, "minimal") | |
|
20 | limited = partial(create_context, "limited") | |
|
21 | unsafe = partial(create_context, "unsafe") | |
|
22 | dangerous = partial(create_context, "dangerous") | |
|
23 | ||
|
24 | LIMITED_OR_HIGHER = [limited, unsafe, dangerous] | |
|
25 | MINIMAL_OR_HIGHER = [minimal, *LIMITED_OR_HIGHER] | |
|
26 | ||
|
27 | ||
|
28 | @contextmanager | |
|
29 | def module_not_installed(module: str): | |
|
30 | import sys | |
|
31 | ||
|
32 | try: | |
|
33 | to_restore = sys.modules[module] | |
|
34 | del sys.modules[module] | |
|
35 | except KeyError: | |
|
36 | to_restore = None | |
|
37 | try: | |
|
38 | yield | |
|
39 | finally: | |
|
40 | sys.modules[module] = to_restore | |
|
41 | ||
|
42 | ||
|
43 | def test_external_not_installed(): | |
|
44 | """ | |
|
45 | Because attribute check requires checking if object is not of allowed | |
|
46 | external type, this tests logic for absence of external module. | |
|
47 | """ | |
|
48 | ||
|
49 | class Custom: | |
|
50 | def __init__(self): | |
|
51 | self.test = 1 | |
|
52 | ||
|
53 | def __getattr__(self, key): | |
|
54 | return key | |
|
55 | ||
|
56 | with module_not_installed("pandas"): | |
|
57 | context = limited(x=Custom()) | |
|
58 | with pytest.raises(GuardRejection): | |
|
59 | guarded_eval("x.test", context) | |
|
60 | ||
|
61 | ||
|
62 | @dec.skip_without("pandas") | |
|
63 | def test_external_changed_api(monkeypatch): | |
|
64 | """Check that the execution rejects if external API changed paths""" | |
|
65 | import pandas as pd | |
|
66 | ||
|
67 | series = pd.Series([1], index=["a"]) | |
|
68 | ||
|
69 | with monkeypatch.context() as m: | |
|
70 | m.delattr(pd, "Series") | |
|
71 | context = limited(data=series) | |
|
72 | with pytest.raises(GuardRejection): | |
|
73 | guarded_eval("data.iloc[0]", context) | |
|
74 | ||
|
75 | ||
|
76 | @dec.skip_without("pandas") | |
|
77 | def test_pandas_series_iloc(): | |
|
78 | import pandas as pd | |
|
79 | ||
|
80 | series = pd.Series([1], index=["a"]) | |
|
81 | context = limited(data=series) | |
|
82 | assert guarded_eval("data.iloc[0]", context) == 1 | |
|
83 | ||
|
84 | ||
|
85 | def test_rejects_custom_properties(): | |
|
86 | class BadProperty: | |
|
87 | @property | |
|
88 | def iloc(self): | |
|
89 | return [None] | |
|
90 | ||
|
91 | series = BadProperty() | |
|
92 | context = limited(data=series) | |
|
93 | ||
|
94 | with pytest.raises(GuardRejection): | |
|
95 | guarded_eval("data.iloc[0]", context) | |
|
96 | ||
|
97 | ||
|
98 | @dec.skip_without("pandas") | |
|
99 | def test_accepts_non_overriden_properties(): | |
|
100 | import pandas as pd | |
|
101 | ||
|
102 | class GoodProperty(pd.Series): | |
|
103 | pass | |
|
104 | ||
|
105 | series = GoodProperty([1], index=["a"]) | |
|
106 | context = limited(data=series) | |
|
107 | ||
|
108 | assert guarded_eval("data.iloc[0]", context) == 1 | |
|
109 | ||
|
110 | ||
|
111 | @dec.skip_without("pandas") | |
|
112 | def test_pandas_series(): | |
|
113 | import pandas as pd | |
|
114 | ||
|
115 | context = limited(data=pd.Series([1], index=["a"])) | |
|
116 | assert guarded_eval('data["a"]', context) == 1 | |
|
117 | with pytest.raises(KeyError): | |
|
118 | guarded_eval('data["c"]', context) | |
|
119 | ||
|
120 | ||
|
121 | @dec.skip_without("pandas") | |
|
122 | def test_pandas_bad_series(): | |
|
123 | import pandas as pd | |
|
124 | ||
|
125 | class BadItemSeries(pd.Series): | |
|
126 | def __getitem__(self, key): | |
|
127 | return "CUSTOM_ITEM" | |
|
128 | ||
|
129 | class BadAttrSeries(pd.Series): | |
|
130 | def __getattr__(self, key): | |
|
131 | return "CUSTOM_ATTR" | |
|
132 | ||
|
133 | bad_series = BadItemSeries([1], index=["a"]) | |
|
134 | context = limited(data=bad_series) | |
|
135 | ||
|
136 | with pytest.raises(GuardRejection): | |
|
137 | guarded_eval('data["a"]', context) | |
|
138 | with pytest.raises(GuardRejection): | |
|
139 | guarded_eval('data["c"]', context) | |
|
140 | ||
|
141 | # note: here result is a bit unexpected because | |
|
142 | # pandas `__getattr__` calls `__getitem__`; | |
|
143 | # FIXME - special case to handle it? | |
|
144 | assert guarded_eval("data.a", context) == "CUSTOM_ITEM" | |
|
145 | ||
|
146 | context = unsafe(data=bad_series) | |
|
147 | assert guarded_eval('data["a"]', context) == "CUSTOM_ITEM" | |
|
148 | ||
|
149 | bad_attr_series = BadAttrSeries([1], index=["a"]) | |
|
150 | context = limited(data=bad_attr_series) | |
|
151 | assert guarded_eval('data["a"]', context) == 1 | |
|
152 | with pytest.raises(GuardRejection): | |
|
153 | guarded_eval("data.a", context) | |
|
154 | ||
|
155 | ||
|
156 | @dec.skip_without("pandas") | |
|
157 | def test_pandas_dataframe_loc(): | |
|
158 | import pandas as pd | |
|
159 | from pandas.testing import assert_series_equal | |
|
160 | ||
|
161 | data = pd.DataFrame([{"a": 1}]) | |
|
162 | context = limited(data=data) | |
|
163 | assert_series_equal(guarded_eval('data.loc[:, "a"]', context), data["a"]) | |
|
164 | ||
|
165 | ||
|
166 | def test_named_tuple(): | |
|
167 | class GoodNamedTuple(NamedTuple): | |
|
168 | a: str | |
|
169 | pass | |
|
170 | ||
|
171 | class BadNamedTuple(NamedTuple): | |
|
172 | a: str | |
|
173 | ||
|
174 | def __getitem__(self, key): | |
|
175 | return None | |
|
176 | ||
|
177 | good = GoodNamedTuple(a="x") | |
|
178 | bad = BadNamedTuple(a="x") | |
|
179 | ||
|
180 | context = limited(data=good) | |
|
181 | assert guarded_eval("data[0]", context) == "x" | |
|
182 | ||
|
183 | context = limited(data=bad) | |
|
184 | with pytest.raises(GuardRejection): | |
|
185 | guarded_eval("data[0]", context) | |
|
186 | ||
|
187 | ||
|
188 | def test_dict(): | |
|
189 | context = limited(data={"a": 1, "b": {"x": 2}, ("x", "y"): 3}) | |
|
190 | assert guarded_eval('data["a"]', context) == 1 | |
|
191 | assert guarded_eval('data["b"]', context) == {"x": 2} | |
|
192 | assert guarded_eval('data["b"]["x"]', context) == 2 | |
|
193 | assert guarded_eval('data["x", "y"]', context) == 3 | |
|
194 | ||
|
195 | assert guarded_eval("data.keys", context) | |
|
196 | ||
|
197 | ||
|
198 | def test_set(): | |
|
199 | context = limited(data={"a", "b"}) | |
|
200 | assert guarded_eval("data.difference", context) | |
|
201 | ||
|
202 | ||
|
203 | def test_list(): | |
|
204 | context = limited(data=[1, 2, 3]) | |
|
205 | assert guarded_eval("data[1]", context) == 2 | |
|
206 | assert guarded_eval("data.copy", context) | |
|
207 | ||
|
208 | ||
|
209 | def test_dict_literal(): | |
|
210 | context = limited() | |
|
211 | assert guarded_eval("{}", context) == {} | |
|
212 | assert guarded_eval('{"a": 1}', context) == {"a": 1} | |
|
213 | ||
|
214 | ||
|
215 | def test_list_literal(): | |
|
216 | context = limited() | |
|
217 | assert guarded_eval("[]", context) == [] | |
|
218 | assert guarded_eval('[1, "a"]', context) == [1, "a"] | |
|
219 | ||
|
220 | ||
|
221 | def test_set_literal(): | |
|
222 | context = limited() | |
|
223 | assert guarded_eval("set()", context) == set() | |
|
224 | assert guarded_eval('{"a"}', context) == {"a"} | |
|
225 | ||
|
226 | ||
|
227 | def test_evaluates_if_expression(): | |
|
228 | context = limited() | |
|
229 | assert guarded_eval("2 if True else 3", context) == 2 | |
|
230 | assert guarded_eval("4 if False else 5", context) == 5 | |
|
231 | ||
|
232 | ||
|
233 | def test_object(): | |
|
234 | obj = object() | |
|
235 | context = limited(obj=obj) | |
|
236 | assert guarded_eval("obj.__dir__", context) == obj.__dir__ | |
|
237 | ||
|
238 | ||
|
239 | @pytest.mark.parametrize( | |
|
240 | "code,expected", | |
|
241 | [ | |
|
242 | ["int.numerator", int.numerator], | |
|
243 | ["float.is_integer", float.is_integer], | |
|
244 | ["complex.real", complex.real], | |
|
245 | ], | |
|
246 | ) | |
|
247 | def test_number_attributes(code, expected): | |
|
248 | assert guarded_eval(code, limited()) == expected | |
|
249 | ||
|
250 | ||
|
251 | def test_method_descriptor(): | |
|
252 | context = limited() | |
|
253 | assert guarded_eval("list.copy.__name__", context) == "copy" | |
|
254 | ||
|
255 | ||
|
256 | @pytest.mark.parametrize( | |
|
257 | "data,good,bad,expected", | |
|
258 | [ | |
|
259 | [[1, 2, 3], "data.index(2)", "data.append(4)", 1], | |
|
260 | [{"a": 1}, "data.keys().isdisjoint({})", "data.update()", True], | |
|
261 | ], | |
|
262 | ) | |
|
263 | def test_evaluates_calls(data, good, bad, expected): | |
|
264 | context = limited(data=data) | |
|
265 | assert guarded_eval(good, context) == expected | |
|
266 | ||
|
267 | with pytest.raises(GuardRejection): | |
|
268 | guarded_eval(bad, context) | |
|
269 | ||
|
270 | ||
|
271 | @pytest.mark.parametrize( | |
|
272 | "code,expected", | |
|
273 | [ | |
|
274 | ["(1\n+\n1)", 2], | |
|
275 | ["list(range(10))[-1:]", [9]], | |
|
276 | ["list(range(20))[3:-2:3]", [3, 6, 9, 12, 15]], | |
|
277 | ], | |
|
278 | ) | |
|
279 | @pytest.mark.parametrize("context", LIMITED_OR_HIGHER) | |
|
280 | def test_evaluates_complex_cases(code, expected, context): | |
|
281 | assert guarded_eval(code, context()) == expected | |
|
282 | ||
|
283 | ||
|
284 | @pytest.mark.parametrize( | |
|
285 | "code,expected", | |
|
286 | [ | |
|
287 | ["1", 1], | |
|
288 | ["1.0", 1.0], | |
|
289 | ["0xdeedbeef", 0xDEEDBEEF], | |
|
290 | ["True", True], | |
|
291 | ["None", None], | |
|
292 | ["{}", {}], | |
|
293 | ["[]", []], | |
|
294 | ], | |
|
295 | ) | |
|
296 | @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER) | |
|
297 | def test_evaluates_literals(code, expected, context): | |
|
298 | assert guarded_eval(code, context()) == expected | |
|
299 | ||
|
300 | ||
|
301 | @pytest.mark.parametrize( | |
|
302 | "code,expected", | |
|
303 | [ | |
|
304 | ["-5", -5], | |
|
305 | ["+5", +5], | |
|
306 | ["~5", -6], | |
|
307 | ], | |
|
308 | ) | |
|
309 | @pytest.mark.parametrize("context", LIMITED_OR_HIGHER) | |
|
310 | def test_evaluates_unary_operations(code, expected, context): | |
|
311 | assert guarded_eval(code, context()) == expected | |
|
312 | ||
|
313 | ||
|
314 | @pytest.mark.parametrize( | |
|
315 | "code,expected", | |
|
316 | [ | |
|
317 | ["1 + 1", 2], | |
|
318 | ["3 - 1", 2], | |
|
319 | ["2 * 3", 6], | |
|
320 | ["5 // 2", 2], | |
|
321 | ["5 / 2", 2.5], | |
|
322 | ["5**2", 25], | |
|
323 | ["2 >> 1", 1], | |
|
324 | ["2 << 1", 4], | |
|
325 | ["1 | 2", 3], | |
|
326 | ["1 & 1", 1], | |
|
327 | ["1 & 2", 0], | |
|
328 | ], | |
|
329 | ) | |
|
330 | @pytest.mark.parametrize("context", LIMITED_OR_HIGHER) | |
|
331 | def test_evaluates_binary_operations(code, expected, context): | |
|
332 | assert guarded_eval(code, context()) == expected | |
|
333 | ||
|
334 | ||
|
335 | @pytest.mark.parametrize( | |
|
336 | "code,expected", | |
|
337 | [ | |
|
338 | ["2 > 1", True], | |
|
339 | ["2 < 1", False], | |
|
340 | ["2 <= 1", False], | |
|
341 | ["2 <= 2", True], | |
|
342 | ["1 >= 2", False], | |
|
343 | ["2 >= 2", True], | |
|
344 | ["2 == 2", True], | |
|
345 | ["1 == 2", False], | |
|
346 | ["1 != 2", True], | |
|
347 | ["1 != 1", False], | |
|
348 | ["1 < 4 < 3", False], | |
|
349 | ["(1 < 4) < 3", True], | |
|
350 | ["4 > 3 > 2 > 1", True], | |
|
351 | ["4 > 3 > 2 > 9", False], | |
|
352 | ["1 < 2 < 3 < 4", True], | |
|
353 | ["9 < 2 < 3 < 4", False], | |
|
354 | ["1 < 2 > 1 > 0 > -1 < 1", True], | |
|
355 | ["1 in [1] in [[1]]", True], | |
|
356 | ["1 in [1] in [[2]]", False], | |
|
357 | ["1 in [1]", True], | |
|
358 | ["0 in [1]", False], | |
|
359 | ["1 not in [1]", False], | |
|
360 | ["0 not in [1]", True], | |
|
361 | ["True is True", True], | |
|
362 | ["False is False", True], | |
|
363 | ["True is False", False], | |
|
364 | ["True is not True", False], | |
|
365 | ["False is not True", True], | |
|
366 | ], | |
|
367 | ) | |
|
368 | @pytest.mark.parametrize("context", LIMITED_OR_HIGHER) | |
|
369 | def test_evaluates_comparisons(code, expected, context): | |
|
370 | assert guarded_eval(code, context()) == expected | |
|
371 | ||
|
372 | ||
|
373 | def test_guards_comparisons(): | |
|
374 | class GoodEq(int): | |
|
375 | pass | |
|
376 | ||
|
377 | class BadEq(int): | |
|
378 | def __eq__(self, other): | |
|
379 | assert False | |
|
380 | ||
|
381 | context = limited(bad=BadEq(1), good=GoodEq(1)) | |
|
382 | ||
|
383 | with pytest.raises(GuardRejection): | |
|
384 | guarded_eval("bad == 1", context) | |
|
385 | ||
|
386 | with pytest.raises(GuardRejection): | |
|
387 | guarded_eval("bad != 1", context) | |
|
388 | ||
|
389 | with pytest.raises(GuardRejection): | |
|
390 | guarded_eval("1 == bad", context) | |
|
391 | ||
|
392 | with pytest.raises(GuardRejection): | |
|
393 | guarded_eval("1 != bad", context) | |
|
394 | ||
|
395 | assert guarded_eval("good == 1", context) is True | |
|
396 | assert guarded_eval("good != 1", context) is False | |
|
397 | assert guarded_eval("1 == good", context) is True | |
|
398 | assert guarded_eval("1 != good", context) is False | |
|
399 | ||
|
400 | ||
|
401 | def test_guards_unary_operations(): | |
|
402 | class GoodOp(int): | |
|
403 | pass | |
|
404 | ||
|
405 | class BadOpInv(int): | |
|
406 | def __inv__(self, other): | |
|
407 | assert False | |
|
408 | ||
|
409 | class BadOpInverse(int): | |
|
410 | def __inv__(self, other): | |
|
411 | assert False | |
|
412 | ||
|
413 | context = limited(good=GoodOp(1), bad1=BadOpInv(1), bad2=BadOpInverse(1)) | |
|
414 | ||
|
415 | with pytest.raises(GuardRejection): | |
|
416 | guarded_eval("~bad1", context) | |
|
417 | ||
|
418 | with pytest.raises(GuardRejection): | |
|
419 | guarded_eval("~bad2", context) | |
|
420 | ||
|
421 | ||
|
422 | def test_guards_binary_operations(): | |
|
423 | class GoodOp(int): | |
|
424 | pass | |
|
425 | ||
|
426 | class BadOp(int): | |
|
427 | def __add__(self, other): | |
|
428 | assert False | |
|
429 | ||
|
430 | context = limited(good=GoodOp(1), bad=BadOp(1)) | |
|
431 | ||
|
432 | with pytest.raises(GuardRejection): | |
|
433 | guarded_eval("1 + bad", context) | |
|
434 | ||
|
435 | with pytest.raises(GuardRejection): | |
|
436 | guarded_eval("bad + 1", context) | |
|
437 | ||
|
438 | assert guarded_eval("good + 1", context) == 2 | |
|
439 | assert guarded_eval("1 + good", context) == 2 | |
|
440 | ||
|
441 | ||
|
442 | def test_guards_attributes(): | |
|
443 | class GoodAttr(float): | |
|
444 | pass | |
|
445 | ||
|
446 | class BadAttr1(float): | |
|
447 | def __getattr__(self, key): | |
|
448 | assert False | |
|
449 | ||
|
450 | class BadAttr2(float): | |
|
451 | def __getattribute__(self, key): | |
|
452 | assert False | |
|
453 | ||
|
454 | context = limited(good=GoodAttr(0.5), bad1=BadAttr1(0.5), bad2=BadAttr2(0.5)) | |
|
455 | ||
|
456 | with pytest.raises(GuardRejection): | |
|
457 | guarded_eval("bad1.as_integer_ratio", context) | |
|
458 | ||
|
459 | with pytest.raises(GuardRejection): | |
|
460 | guarded_eval("bad2.as_integer_ratio", context) | |
|
461 | ||
|
462 | assert guarded_eval("good.as_integer_ratio()", context) == (1, 2) | |
|
463 | ||
|
464 | ||
|
465 | @pytest.mark.parametrize("context", MINIMAL_OR_HIGHER) | |
|
466 | def test_access_builtins(context): | |
|
467 | assert guarded_eval("round", context()) == round | |
|
468 | ||
|
469 | ||
|
470 | def test_access_builtins_fails(): | |
|
471 | context = limited() | |
|
472 | with pytest.raises(NameError): | |
|
473 | guarded_eval("this_is_not_builtin", context) | |
|
474 | ||
|
475 | ||
|
476 | def test_rejects_forbidden(): | |
|
477 | context = forbidden() | |
|
478 | with pytest.raises(GuardRejection): | |
|
479 | guarded_eval("1", context) | |
|
480 | ||
|
481 | ||
|
482 | def test_guards_locals_and_globals(): | |
|
483 | context = EvaluationContext( | |
|
484 | locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="minimal" | |
|
485 | ) | |
|
486 | ||
|
487 | with pytest.raises(GuardRejection): | |
|
488 | guarded_eval("local_a", context) | |
|
489 | ||
|
490 | with pytest.raises(GuardRejection): | |
|
491 | guarded_eval("global_b", context) | |
|
492 | ||
|
493 | ||
|
494 | def test_access_locals_and_globals(): | |
|
495 | context = EvaluationContext( | |
|
496 | locals={"local_a": "a"}, globals={"global_b": "b"}, evaluation="limited" | |
|
497 | ) | |
|
498 | assert guarded_eval("local_a", context) == "a" | |
|
499 | assert guarded_eval("global_b", context) == "b" | |
|
500 | ||
|
501 | ||
|
502 | @pytest.mark.parametrize( | |
|
503 | "code", | |
|
504 | ["def func(): pass", "class C: pass", "x = 1", "x += 1", "del x", "import ast"], | |
|
505 | ) | |
|
506 | @pytest.mark.parametrize("context", [minimal(), limited(), unsafe()]) | |
|
507 | def test_rejects_side_effect_syntax(code, context): | |
|
508 | with pytest.raises(SyntaxError): | |
|
509 | guarded_eval(code, context) | |
|
510 | ||
|
511 | ||
|
512 | def test_subscript(): | |
|
513 | context = EvaluationContext( | |
|
514 | locals={}, globals={}, evaluation="limited", in_subscript=True | |
|
515 | ) | |
|
516 | empty_slice = slice(None, None, None) | |
|
517 | assert guarded_eval("", context) == tuple() | |
|
518 | assert guarded_eval(":", context) == empty_slice | |
|
519 | assert guarded_eval("1:2:3", context) == slice(1, 2, 3) | |
|
520 | assert guarded_eval(':, "a"', context) == (empty_slice, "a") | |
|
521 | ||
|
522 | ||
|
523 | def test_unbind_method(): | |
|
524 | class X(list): | |
|
525 | def index(self, k): | |
|
526 | return "CUSTOM" | |
|
527 | ||
|
528 | x = X() | |
|
529 | assert _unbind_method(x.index) is X.index | |
|
530 | assert _unbind_method([].index) is list.index | |
|
531 | assert _unbind_method(list.index) is None | |
|
532 | ||
|
533 | ||
|
534 | def test_assumption_instance_attr_do_not_matter(): | |
|
535 | """This is semi-specified in Python documentation. | |
|
536 | ||
|
537 | However, since the specification says 'not guaranted | |
|
538 | to work' rather than 'is forbidden to work', future | |
|
539 | versions could invalidate this assumptions. This test | |
|
540 | is meant to catch such a change if it ever comes true. | |
|
541 | """ | |
|
542 | ||
|
543 | class T: | |
|
544 | def __getitem__(self, k): | |
|
545 | return "a" | |
|
546 | ||
|
547 | def __getattr__(self, k): | |
|
548 | return "a" | |
|
549 | ||
|
550 | def f(self): | |
|
551 | return "b" | |
|
552 | ||
|
553 | t = T() | |
|
554 | t.__getitem__ = f | |
|
555 | t.__getattr__ = f | |
|
556 | assert t[1] == "a" | |
|
557 | assert t[1] == "a" | |
|
558 | ||
|
559 | ||
|
560 | def test_assumption_named_tuples_share_getitem(): | |
|
561 | """Check assumption on named tuples sharing __getitem__""" | |
|
562 | from typing import NamedTuple | |
|
563 | ||
|
564 | class A(NamedTuple): | |
|
565 | pass | |
|
566 | ||
|
567 | class B(NamedTuple): | |
|
568 | pass | |
|
569 | ||
|
570 | assert A.__getitem__ == B.__getitem__ |
|
1 | NO CONTENT: new file 100644 | |
The requested commit or file is too big and content was truncated. Show full diff |
@@ -1,37 +1,39 b'' | |||
|
1 | 1 | name: Run MyPy |
|
2 | 2 | |
|
3 | 3 | on: |
|
4 | 4 | push: |
|
5 | 5 | branches: [ main, 7.x] |
|
6 | 6 | pull_request: |
|
7 | 7 | branches: [ main, 7.x] |
|
8 | 8 | |
|
9 | 9 | permissions: |
|
10 | 10 | contents: read |
|
11 | 11 | |
|
12 | 12 | jobs: |
|
13 | 13 | build: |
|
14 | 14 | |
|
15 | 15 | runs-on: ubuntu-latest |
|
16 | 16 | strategy: |
|
17 | 17 | matrix: |
|
18 |
python-version: [ |
|
|
18 | python-version: ["3.x"] | |
|
19 | 19 | |
|
20 | 20 | steps: |
|
21 | 21 | - uses: actions/checkout@v3 |
|
22 | 22 | - name: Set up Python ${{ matrix.python-version }} |
|
23 | 23 | uses: actions/setup-python@v4 |
|
24 | 24 | with: |
|
25 | 25 | python-version: ${{ matrix.python-version }} |
|
26 | 26 | - name: Install dependencies |
|
27 | 27 | run: | |
|
28 | 28 | python -m pip install --upgrade pip |
|
29 | 29 | pip install mypy pyflakes flake8 |
|
30 | 30 | - name: Lint with mypy |
|
31 | 31 | run: | |
|
32 | 32 | mypy -p IPython.terminal |
|
33 | 33 | mypy -p IPython.core.magics |
|
34 | mypy -p IPython.core.guarded_eval | |
|
35 | mypy -p IPython.core.completer | |
|
34 | 36 | - name: Lint with pyflakes |
|
35 | 37 | run: | |
|
36 | 38 | flake8 IPython/core/magics/script.py |
|
37 | 39 | flake8 IPython/core/magics/packaging.py |
@@ -1,83 +1,83 b'' | |||
|
1 | 1 | name: Run tests |
|
2 | 2 | |
|
3 | 3 | on: |
|
4 | 4 | push: |
|
5 | 5 | branches: |
|
6 | 6 | - main |
|
7 | 7 | - '*.x' |
|
8 | 8 | pull_request: |
|
9 | 9 | # Run weekly on Monday at 1:23 UTC |
|
10 | 10 | schedule: |
|
11 | 11 | - cron: '23 1 * * 1' |
|
12 | 12 | workflow_dispatch: |
|
13 | 13 | |
|
14 | 14 | |
|
15 | 15 | jobs: |
|
16 | 16 | test: |
|
17 | 17 | runs-on: ${{ matrix.os }} |
|
18 | 18 | strategy: |
|
19 | 19 | fail-fast: false |
|
20 | 20 | matrix: |
|
21 | 21 | os: [ubuntu-latest, windows-latest] |
|
22 | python-version: ["3.8", "3.9", "3.10"] | |
|
22 | python-version: ["3.8", "3.9", "3.10", "3.11"] | |
|
23 | 23 | deps: [test_extra] |
|
24 | 24 | # Test all on ubuntu, test ends on macos |
|
25 | 25 | include: |
|
26 | 26 | - os: macos-latest |
|
27 | 27 | python-version: "3.8" |
|
28 | 28 | deps: test_extra |
|
29 | 29 | - os: macos-latest |
|
30 |
python-version: "3.1 |
|
|
30 | python-version: "3.11" | |
|
31 | 31 | deps: test_extra |
|
32 | 32 | # Tests minimal dependencies set |
|
33 | 33 | - os: ubuntu-latest |
|
34 |
python-version: "3.1 |
|
|
34 | python-version: "3.11" | |
|
35 | 35 | deps: test |
|
36 | 36 | # Tests latest development Python version |
|
37 | 37 | - os: ubuntu-latest |
|
38 |
python-version: "3.1 |
|
|
38 | python-version: "3.12-dev" | |
|
39 | 39 | deps: test |
|
40 | 40 | # Installing optional dependencies stuff takes ages on PyPy |
|
41 | 41 | - os: ubuntu-latest |
|
42 | 42 | python-version: "pypy-3.8" |
|
43 | 43 | deps: test |
|
44 | 44 | - os: windows-latest |
|
45 | 45 | python-version: "pypy-3.8" |
|
46 | 46 | deps: test |
|
47 | 47 | - os: macos-latest |
|
48 | 48 | python-version: "pypy-3.8" |
|
49 | 49 | deps: test |
|
50 | 50 | |
|
51 | 51 | steps: |
|
52 | 52 | - uses: actions/checkout@v3 |
|
53 | 53 | - name: Set up Python ${{ matrix.python-version }} |
|
54 | 54 | uses: actions/setup-python@v4 |
|
55 | 55 | with: |
|
56 | 56 | python-version: ${{ matrix.python-version }} |
|
57 | 57 | cache: pip |
|
58 | 58 | - name: Install latex |
|
59 | 59 | if: runner.os == 'Linux' && matrix.deps == 'test_extra' |
|
60 | 60 | run: echo "disable latex for now, issues in mirros" #sudo apt-get -yq -o Acquire::Retries=3 --no-install-suggests --no-install-recommends install texlive dvipng |
|
61 | 61 | - name: Install and update Python dependencies |
|
62 | 62 | run: | |
|
63 | 63 | python -m pip install --upgrade pip setuptools wheel build |
|
64 | 64 | python -m pip install --upgrade -e .[${{ matrix.deps }}] |
|
65 | 65 | python -m pip install --upgrade check-manifest pytest-cov |
|
66 | 66 | - name: Try building with Python build |
|
67 | 67 | if: runner.os != 'Windows' # setup.py does not support sdist on Windows |
|
68 | 68 | run: | |
|
69 | 69 | python -m build |
|
70 | 70 | shasum -a 256 dist/* |
|
71 | 71 | - name: Check manifest |
|
72 | 72 | if: runner.os != 'Windows' # setup.py does not support sdist on Windows |
|
73 | 73 | run: check-manifest |
|
74 | 74 | - name: pytest |
|
75 | 75 | env: |
|
76 | 76 | COLUMNS: 120 |
|
77 | 77 | run: | |
|
78 | 78 | pytest --color=yes -raXxs ${{ startsWith(matrix.python-version, 'pypy') && ' ' || '--cov --cov-report=xml' }} |
|
79 | 79 | - name: Upload coverage to Codecov |
|
80 | 80 | uses: codecov/codecov-action@v3 |
|
81 | 81 | with: |
|
82 | 82 | name: Test |
|
83 | 83 | files: /home/runner/work/ipython/ipython/coverage.xml |
@@ -1,155 +1,156 b'' | |||
|
1 | # PYTHON_ARGCOMPLETE_OK | |
|
1 | 2 | """ |
|
2 | 3 | IPython: tools for interactive and parallel computing in Python. |
|
3 | 4 | |
|
4 | 5 | https://ipython.org |
|
5 | 6 | """ |
|
6 | 7 | #----------------------------------------------------------------------------- |
|
7 | 8 | # Copyright (c) 2008-2011, IPython Development Team. |
|
8 | 9 | # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu> |
|
9 | 10 | # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de> |
|
10 | 11 | # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu> |
|
11 | 12 | # |
|
12 | 13 | # Distributed under the terms of the Modified BSD License. |
|
13 | 14 | # |
|
14 | 15 | # The full license is in the file COPYING.txt, distributed with this software. |
|
15 | 16 | #----------------------------------------------------------------------------- |
|
16 | 17 | |
|
17 | 18 | #----------------------------------------------------------------------------- |
|
18 | 19 | # Imports |
|
19 | 20 | #----------------------------------------------------------------------------- |
|
20 | 21 | |
|
21 | 22 | import sys |
|
22 | 23 | |
|
23 | 24 | #----------------------------------------------------------------------------- |
|
24 | 25 | # Setup everything |
|
25 | 26 | #----------------------------------------------------------------------------- |
|
26 | 27 | |
|
27 | 28 | # Don't forget to also update setup.py when this changes! |
|
28 | 29 | if sys.version_info < (3, 8): |
|
29 | 30 | raise ImportError( |
|
30 | 31 | """ |
|
31 | 32 | IPython 8+ supports Python 3.8 and above, following NEP 29. |
|
32 | 33 | When using Python 2.7, please install IPython 5.x LTS Long Term Support version. |
|
33 | 34 | Python 3.3 and 3.4 were supported up to IPython 6.x. |
|
34 | 35 | Python 3.5 was supported with IPython 7.0 to 7.9. |
|
35 | 36 | Python 3.6 was supported with IPython up to 7.16. |
|
36 | 37 | Python 3.7 was still supported with the 7.x branch. |
|
37 | 38 | |
|
38 | 39 | See IPython `README.rst` file for more information: |
|
39 | 40 | |
|
40 | 41 | https://github.com/ipython/ipython/blob/main/README.rst |
|
41 | 42 | |
|
42 | 43 | """ |
|
43 | 44 | ) |
|
44 | 45 | |
|
45 | 46 | #----------------------------------------------------------------------------- |
|
46 | 47 | # Setup the top level names |
|
47 | 48 | #----------------------------------------------------------------------------- |
|
48 | 49 | |
|
49 | 50 | from .core.getipython import get_ipython |
|
50 | 51 | from .core import release |
|
51 | 52 | from .core.application import Application |
|
52 | 53 | from .terminal.embed import embed |
|
53 | 54 | |
|
54 | 55 | from .core.interactiveshell import InteractiveShell |
|
55 | 56 | from .utils.sysinfo import sys_info |
|
56 | 57 | from .utils.frame import extract_module_locals |
|
57 | 58 | |
|
58 | 59 | # Release data |
|
59 | 60 | __author__ = '%s <%s>' % (release.author, release.author_email) |
|
60 | 61 | __license__ = release.license |
|
61 | 62 | __version__ = release.version |
|
62 | 63 | version_info = release.version_info |
|
63 | 64 | # list of CVEs that should have been patched in this release. |
|
64 | 65 | # this is informational and should not be relied upon. |
|
65 | 66 | __patched_cves__ = {"CVE-2022-21699"} |
|
66 | 67 | |
|
67 | 68 | |
|
68 | 69 | def embed_kernel(module=None, local_ns=None, **kwargs): |
|
69 | 70 | """Embed and start an IPython kernel in a given scope. |
|
70 | 71 | |
|
71 | 72 | If you don't want the kernel to initialize the namespace |
|
72 | 73 | from the scope of the surrounding function, |
|
73 | 74 | and/or you want to load full IPython configuration, |
|
74 | 75 | you probably want `IPython.start_kernel()` instead. |
|
75 | 76 | |
|
76 | 77 | Parameters |
|
77 | 78 | ---------- |
|
78 | 79 | module : types.ModuleType, optional |
|
79 | 80 | The module to load into IPython globals (default: caller) |
|
80 | 81 | local_ns : dict, optional |
|
81 | 82 | The namespace to load into IPython user namespace (default: caller) |
|
82 | 83 | **kwargs : various, optional |
|
83 | 84 | Further keyword args are relayed to the IPKernelApp constructor, |
|
84 | 85 | allowing configuration of the Kernel. Will only have an effect |
|
85 | 86 | on the first embed_kernel call for a given process. |
|
86 | 87 | """ |
|
87 | 88 | |
|
88 | 89 | (caller_module, caller_locals) = extract_module_locals(1) |
|
89 | 90 | if module is None: |
|
90 | 91 | module = caller_module |
|
91 | 92 | if local_ns is None: |
|
92 | 93 | local_ns = caller_locals |
|
93 | 94 | |
|
94 | 95 | # Only import .zmq when we really need it |
|
95 | 96 | from ipykernel.embed import embed_kernel as real_embed_kernel |
|
96 | 97 | real_embed_kernel(module=module, local_ns=local_ns, **kwargs) |
|
97 | 98 | |
|
98 | 99 | def start_ipython(argv=None, **kwargs): |
|
99 | 100 | """Launch a normal IPython instance (as opposed to embedded) |
|
100 | 101 | |
|
101 | 102 | `IPython.embed()` puts a shell in a particular calling scope, |
|
102 | 103 | such as a function or method for debugging purposes, |
|
103 | 104 | which is often not desirable. |
|
104 | 105 | |
|
105 | 106 | `start_ipython()` does full, regular IPython initialization, |
|
106 | 107 | including loading startup files, configuration, etc. |
|
107 | 108 | much of which is skipped by `embed()`. |
|
108 | 109 | |
|
109 | 110 | This is a public API method, and will survive implementation changes. |
|
110 | 111 | |
|
111 | 112 | Parameters |
|
112 | 113 | ---------- |
|
113 | 114 | argv : list or None, optional |
|
114 | 115 | If unspecified or None, IPython will parse command-line options from sys.argv. |
|
115 | 116 | To prevent any command-line parsing, pass an empty list: `argv=[]`. |
|
116 | 117 | user_ns : dict, optional |
|
117 | 118 | specify this dictionary to initialize the IPython user namespace with particular values. |
|
118 | 119 | **kwargs : various, optional |
|
119 | 120 | Any other kwargs will be passed to the Application constructor, |
|
120 | 121 | such as `config`. |
|
121 | 122 | """ |
|
122 | 123 | from IPython.terminal.ipapp import launch_new_instance |
|
123 | 124 | return launch_new_instance(argv=argv, **kwargs) |
|
124 | 125 | |
|
125 | 126 | def start_kernel(argv=None, **kwargs): |
|
126 | 127 | """Launch a normal IPython kernel instance (as opposed to embedded) |
|
127 | 128 | |
|
128 | 129 | `IPython.embed_kernel()` puts a shell in a particular calling scope, |
|
129 | 130 | such as a function or method for debugging purposes, |
|
130 | 131 | which is often not desirable. |
|
131 | 132 | |
|
132 | 133 | `start_kernel()` does full, regular IPython initialization, |
|
133 | 134 | including loading startup files, configuration, etc. |
|
134 | 135 | much of which is skipped by `embed()`. |
|
135 | 136 | |
|
136 | 137 | Parameters |
|
137 | 138 | ---------- |
|
138 | 139 | argv : list or None, optional |
|
139 | 140 | If unspecified or None, IPython will parse command-line options from sys.argv. |
|
140 | 141 | To prevent any command-line parsing, pass an empty list: `argv=[]`. |
|
141 | 142 | user_ns : dict, optional |
|
142 | 143 | specify this dictionary to initialize the IPython user namespace with particular values. |
|
143 | 144 | **kwargs : various, optional |
|
144 | 145 | Any other kwargs will be passed to the Application constructor, |
|
145 | 146 | such as `config`. |
|
146 | 147 | """ |
|
147 | 148 | import warnings |
|
148 | 149 | |
|
149 | 150 | warnings.warn( |
|
150 | 151 | "start_kernel is deprecated since IPython 8.0, use from `ipykernel.kernelapp.launch_new_instance`", |
|
151 | 152 | DeprecationWarning, |
|
152 | 153 | stacklevel=2, |
|
153 | 154 | ) |
|
154 | 155 | from ipykernel.kernelapp import launch_new_instance |
|
155 | 156 | return launch_new_instance(argv=argv, **kwargs) |
@@ -1,14 +1,15 b'' | |||
|
1 | # PYTHON_ARGCOMPLETE_OK | |
|
1 | 2 | # encoding: utf-8 |
|
2 | 3 | """Terminal-based IPython entry point. |
|
3 | 4 | """ |
|
4 | 5 | #----------------------------------------------------------------------------- |
|
5 | 6 | # Copyright (c) 2012, IPython Development Team. |
|
6 | 7 | # |
|
7 | 8 | # Distributed under the terms of the Modified BSD License. |
|
8 | 9 | # |
|
9 | 10 | # The full license is in the file COPYING.txt, distributed with this software. |
|
10 | 11 | #----------------------------------------------------------------------------- |
|
11 | 12 | |
|
12 | 13 | from IPython import start_ipython |
|
13 | 14 | |
|
14 | 15 | start_ipython() |
@@ -1,489 +1,488 b'' | |||
|
1 | 1 | # encoding: utf-8 |
|
2 | 2 | """ |
|
3 | 3 | An application for IPython. |
|
4 | 4 | |
|
5 | 5 | All top-level applications should use the classes in this module for |
|
6 | 6 | handling configuration and creating configurables. |
|
7 | 7 | |
|
8 | 8 | The job of an :class:`Application` is to create the master configuration |
|
9 | 9 | object and then create the configurable objects, passing the config to them. |
|
10 | 10 | """ |
|
11 | 11 | |
|
12 | 12 | # Copyright (c) IPython Development Team. |
|
13 | 13 | # Distributed under the terms of the Modified BSD License. |
|
14 | 14 | |
|
15 | 15 | import atexit |
|
16 | 16 | from copy import deepcopy |
|
17 | 17 | import logging |
|
18 | 18 | import os |
|
19 | 19 | import shutil |
|
20 | 20 | import sys |
|
21 | 21 | |
|
22 | 22 | from pathlib import Path |
|
23 | 23 | |
|
24 | 24 | from traitlets.config.application import Application, catch_config_error |
|
25 | 25 | from traitlets.config.loader import ConfigFileNotFound, PyFileConfigLoader |
|
26 | 26 | from IPython.core import release, crashhandler |
|
27 | 27 | from IPython.core.profiledir import ProfileDir, ProfileDirError |
|
28 | 28 | from IPython.paths import get_ipython_dir, get_ipython_package_dir |
|
29 | 29 | from IPython.utils.path import ensure_dir_exists |
|
30 | 30 | from traitlets import ( |
|
31 | 31 | List, Unicode, Type, Bool, Set, Instance, Undefined, |
|
32 | 32 | default, observe, |
|
33 | 33 | ) |
|
34 | 34 | |
|
35 | 35 | if os.name == "nt": |
|
36 | 36 | programdata = os.environ.get("PROGRAMDATA", None) |
|
37 | 37 | if programdata is not None: |
|
38 | 38 | SYSTEM_CONFIG_DIRS = [str(Path(programdata) / "ipython")] |
|
39 | 39 | else: # PROGRAMDATA is not defined by default on XP. |
|
40 | 40 | SYSTEM_CONFIG_DIRS = [] |
|
41 | 41 | else: |
|
42 | 42 | SYSTEM_CONFIG_DIRS = [ |
|
43 | 43 | "/usr/local/etc/ipython", |
|
44 | 44 | "/etc/ipython", |
|
45 | 45 | ] |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | ENV_CONFIG_DIRS = [] |
|
49 | 49 | _env_config_dir = os.path.join(sys.prefix, 'etc', 'ipython') |
|
50 | 50 | if _env_config_dir not in SYSTEM_CONFIG_DIRS: |
|
51 | 51 | # only add ENV_CONFIG if sys.prefix is not already included |
|
52 | 52 | ENV_CONFIG_DIRS.append(_env_config_dir) |
|
53 | 53 | |
|
54 | 54 | |
|
55 | 55 | _envvar = os.environ.get('IPYTHON_SUPPRESS_CONFIG_ERRORS') |
|
56 | 56 | if _envvar in {None, ''}: |
|
57 | 57 | IPYTHON_SUPPRESS_CONFIG_ERRORS = None |
|
58 | 58 | else: |
|
59 | 59 | if _envvar.lower() in {'1','true'}: |
|
60 | 60 | IPYTHON_SUPPRESS_CONFIG_ERRORS = True |
|
61 | 61 | elif _envvar.lower() in {'0','false'} : |
|
62 | 62 | IPYTHON_SUPPRESS_CONFIG_ERRORS = False |
|
63 | 63 | else: |
|
64 | 64 | sys.exit("Unsupported value for environment variable: 'IPYTHON_SUPPRESS_CONFIG_ERRORS' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."% _envvar ) |
|
65 | 65 | |
|
66 | 66 | # aliases and flags |
|
67 | 67 | |
|
68 | 68 | base_aliases = {} |
|
69 | 69 | if isinstance(Application.aliases, dict): |
|
70 | 70 | # traitlets 5 |
|
71 | 71 | base_aliases.update(Application.aliases) |
|
72 | 72 | base_aliases.update( |
|
73 | 73 | { |
|
74 | 74 | "profile-dir": "ProfileDir.location", |
|
75 | 75 | "profile": "BaseIPythonApplication.profile", |
|
76 | 76 | "ipython-dir": "BaseIPythonApplication.ipython_dir", |
|
77 | 77 | "log-level": "Application.log_level", |
|
78 | 78 | "config": "BaseIPythonApplication.extra_config_file", |
|
79 | 79 | } |
|
80 | 80 | ) |
|
81 | 81 | |
|
82 | 82 | base_flags = dict() |
|
83 | 83 | if isinstance(Application.flags, dict): |
|
84 | 84 | # traitlets 5 |
|
85 | 85 | base_flags.update(Application.flags) |
|
86 | 86 | base_flags.update( |
|
87 | 87 | dict( |
|
88 | 88 | debug=( |
|
89 | 89 | {"Application": {"log_level": logging.DEBUG}}, |
|
90 | 90 | "set log level to logging.DEBUG (maximize logging output)", |
|
91 | 91 | ), |
|
92 | 92 | quiet=( |
|
93 | 93 | {"Application": {"log_level": logging.CRITICAL}}, |
|
94 | 94 | "set log level to logging.CRITICAL (minimize logging output)", |
|
95 | 95 | ), |
|
96 | 96 | init=( |
|
97 | 97 | { |
|
98 | 98 | "BaseIPythonApplication": { |
|
99 | 99 | "copy_config_files": True, |
|
100 | 100 | "auto_create": True, |
|
101 | 101 | } |
|
102 | 102 | }, |
|
103 | 103 | """Initialize profile with default config files. This is equivalent |
|
104 | 104 | to running `ipython profile create <profile>` prior to startup. |
|
105 | 105 | """, |
|
106 | 106 | ), |
|
107 | 107 | ) |
|
108 | 108 | ) |
|
109 | 109 | |
|
110 | 110 | |
|
111 | 111 | class ProfileAwareConfigLoader(PyFileConfigLoader): |
|
112 | 112 | """A Python file config loader that is aware of IPython profiles.""" |
|
113 | 113 | def load_subconfig(self, fname, path=None, profile=None): |
|
114 | 114 | if profile is not None: |
|
115 | 115 | try: |
|
116 | 116 | profile_dir = ProfileDir.find_profile_dir_by_name( |
|
117 | 117 | get_ipython_dir(), |
|
118 | 118 | profile, |
|
119 | 119 | ) |
|
120 | 120 | except ProfileDirError: |
|
121 | 121 | return |
|
122 | 122 | path = profile_dir.location |
|
123 | 123 | return super(ProfileAwareConfigLoader, self).load_subconfig(fname, path=path) |
|
124 | 124 | |
|
125 | 125 | class BaseIPythonApplication(Application): |
|
126 | ||
|
127 | name = u'ipython' | |
|
128 | description = Unicode(u'IPython: an enhanced interactive Python shell.') | |
|
126 | name = "ipython" | |
|
127 | description = "IPython: an enhanced interactive Python shell." | |
|
129 | 128 | version = Unicode(release.version) |
|
130 | 129 | |
|
131 | 130 | aliases = base_aliases |
|
132 | 131 | flags = base_flags |
|
133 | 132 | classes = List([ProfileDir]) |
|
134 | 133 | |
|
135 | 134 | # enable `load_subconfig('cfg.py', profile='name')` |
|
136 | 135 | python_config_loader_class = ProfileAwareConfigLoader |
|
137 | 136 | |
|
138 | 137 | # Track whether the config_file has changed, |
|
139 | 138 | # because some logic happens only if we aren't using the default. |
|
140 | 139 | config_file_specified = Set() |
|
141 | 140 | |
|
142 | 141 | config_file_name = Unicode() |
|
143 | 142 | @default('config_file_name') |
|
144 | 143 | def _config_file_name_default(self): |
|
145 | 144 | return self.name.replace('-','_') + u'_config.py' |
|
146 | 145 | @observe('config_file_name') |
|
147 | 146 | def _config_file_name_changed(self, change): |
|
148 | 147 | if change['new'] != change['old']: |
|
149 | 148 | self.config_file_specified.add(change['new']) |
|
150 | 149 | |
|
151 | 150 | # The directory that contains IPython's builtin profiles. |
|
152 | 151 | builtin_profile_dir = Unicode( |
|
153 | 152 | os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default') |
|
154 | 153 | ) |
|
155 | 154 | |
|
156 | 155 | config_file_paths = List(Unicode()) |
|
157 | 156 | @default('config_file_paths') |
|
158 | 157 | def _config_file_paths_default(self): |
|
159 | 158 | return [] |
|
160 | 159 | |
|
161 | 160 | extra_config_file = Unicode( |
|
162 | 161 | help="""Path to an extra config file to load. |
|
163 | 162 | |
|
164 | 163 | If specified, load this config file in addition to any other IPython config. |
|
165 | 164 | """).tag(config=True) |
|
166 | 165 | @observe('extra_config_file') |
|
167 | 166 | def _extra_config_file_changed(self, change): |
|
168 | 167 | old = change['old'] |
|
169 | 168 | new = change['new'] |
|
170 | 169 | try: |
|
171 | 170 | self.config_files.remove(old) |
|
172 | 171 | except ValueError: |
|
173 | 172 | pass |
|
174 | 173 | self.config_file_specified.add(new) |
|
175 | 174 | self.config_files.append(new) |
|
176 | 175 | |
|
177 | 176 | profile = Unicode(u'default', |
|
178 | 177 | help="""The IPython profile to use.""" |
|
179 | 178 | ).tag(config=True) |
|
180 | 179 | |
|
181 | 180 | @observe('profile') |
|
182 | 181 | def _profile_changed(self, change): |
|
183 | 182 | self.builtin_profile_dir = os.path.join( |
|
184 | 183 | get_ipython_package_dir(), u'config', u'profile', change['new'] |
|
185 | 184 | ) |
|
186 | 185 | |
|
187 | 186 | add_ipython_dir_to_sys_path = Bool( |
|
188 | 187 | False, |
|
189 | 188 | """Should the IPython profile directory be added to sys path ? |
|
190 | 189 | |
|
191 | 190 | This option was non-existing before IPython 8.0, and ipython_dir was added to |
|
192 | 191 | sys path to allow import of extensions present there. This was historical |
|
193 | 192 | baggage from when pip did not exist. This now default to false, |
|
194 | 193 | but can be set to true for legacy reasons. |
|
195 | 194 | """, |
|
196 | 195 | ).tag(config=True) |
|
197 | 196 | |
|
198 | 197 | ipython_dir = Unicode( |
|
199 | 198 | help=""" |
|
200 | 199 | The name of the IPython directory. This directory is used for logging |
|
201 | 200 | configuration (through profiles), history storage, etc. The default |
|
202 | 201 | is usually $HOME/.ipython. This option can also be specified through |
|
203 | 202 | the environment variable IPYTHONDIR. |
|
204 | 203 | """ |
|
205 | 204 | ).tag(config=True) |
|
206 | 205 | @default('ipython_dir') |
|
207 | 206 | def _ipython_dir_default(self): |
|
208 | 207 | d = get_ipython_dir() |
|
209 | 208 | self._ipython_dir_changed({ |
|
210 | 209 | 'name': 'ipython_dir', |
|
211 | 210 | 'old': d, |
|
212 | 211 | 'new': d, |
|
213 | 212 | }) |
|
214 | 213 | return d |
|
215 | 214 | |
|
216 | 215 | _in_init_profile_dir = False |
|
217 | 216 | profile_dir = Instance(ProfileDir, allow_none=True) |
|
218 | 217 | @default('profile_dir') |
|
219 | 218 | def _profile_dir_default(self): |
|
220 | 219 | # avoid recursion |
|
221 | 220 | if self._in_init_profile_dir: |
|
222 | 221 | return |
|
223 | 222 | # profile_dir requested early, force initialization |
|
224 | 223 | self.init_profile_dir() |
|
225 | 224 | return self.profile_dir |
|
226 | 225 | |
|
227 | 226 | overwrite = Bool(False, |
|
228 | 227 | help="""Whether to overwrite existing config files when copying""" |
|
229 | 228 | ).tag(config=True) |
|
230 | 229 | auto_create = Bool(False, |
|
231 | 230 | help="""Whether to create profile dir if it doesn't exist""" |
|
232 | 231 | ).tag(config=True) |
|
233 | 232 | |
|
234 | 233 | config_files = List(Unicode()) |
|
235 | 234 | @default('config_files') |
|
236 | 235 | def _config_files_default(self): |
|
237 | 236 | return [self.config_file_name] |
|
238 | 237 | |
|
239 | 238 | copy_config_files = Bool(False, |
|
240 | 239 | help="""Whether to install the default config files into the profile dir. |
|
241 | 240 | If a new profile is being created, and IPython contains config files for that |
|
242 | 241 | profile, then they will be staged into the new directory. Otherwise, |
|
243 | 242 | default config files will be automatically generated. |
|
244 | 243 | """).tag(config=True) |
|
245 | 244 | |
|
246 | 245 | verbose_crash = Bool(False, |
|
247 | 246 | help="""Create a massive crash report when IPython encounters what may be an |
|
248 | 247 | internal error. The default is to append a short message to the |
|
249 | 248 | usual traceback""").tag(config=True) |
|
250 | 249 | |
|
251 | 250 | # The class to use as the crash handler. |
|
252 | 251 | crash_handler_class = Type(crashhandler.CrashHandler) |
|
253 | 252 | |
|
254 | 253 | @catch_config_error |
|
255 | 254 | def __init__(self, **kwargs): |
|
256 | 255 | super(BaseIPythonApplication, self).__init__(**kwargs) |
|
257 | 256 | # ensure current working directory exists |
|
258 | 257 | try: |
|
259 | 258 | os.getcwd() |
|
260 | 259 | except: |
|
261 | 260 | # exit if cwd doesn't exist |
|
262 | 261 | self.log.error("Current working directory doesn't exist.") |
|
263 | 262 | self.exit(1) |
|
264 | 263 | |
|
265 | 264 | #------------------------------------------------------------------------- |
|
266 | 265 | # Various stages of Application creation |
|
267 | 266 | #------------------------------------------------------------------------- |
|
268 | 267 | |
|
269 | 268 | def init_crash_handler(self): |
|
270 | 269 | """Create a crash handler, typically setting sys.excepthook to it.""" |
|
271 | 270 | self.crash_handler = self.crash_handler_class(self) |
|
272 | 271 | sys.excepthook = self.excepthook |
|
273 | 272 | def unset_crashhandler(): |
|
274 | 273 | sys.excepthook = sys.__excepthook__ |
|
275 | 274 | atexit.register(unset_crashhandler) |
|
276 | 275 | |
|
277 | 276 | def excepthook(self, etype, evalue, tb): |
|
278 | 277 | """this is sys.excepthook after init_crashhandler |
|
279 | 278 | |
|
280 | 279 | set self.verbose_crash=True to use our full crashhandler, instead of |
|
281 | 280 | a regular traceback with a short message (crash_handler_lite) |
|
282 | 281 | """ |
|
283 | 282 | |
|
284 | 283 | if self.verbose_crash: |
|
285 | 284 | return self.crash_handler(etype, evalue, tb) |
|
286 | 285 | else: |
|
287 | 286 | return crashhandler.crash_handler_lite(etype, evalue, tb) |
|
288 | 287 | |
|
289 | 288 | @observe('ipython_dir') |
|
290 | 289 | def _ipython_dir_changed(self, change): |
|
291 | 290 | old = change['old'] |
|
292 | 291 | new = change['new'] |
|
293 | 292 | if old is not Undefined: |
|
294 | 293 | str_old = os.path.abspath(old) |
|
295 | 294 | if str_old in sys.path: |
|
296 | 295 | sys.path.remove(str_old) |
|
297 | 296 | if self.add_ipython_dir_to_sys_path: |
|
298 | 297 | str_path = os.path.abspath(new) |
|
299 | 298 | sys.path.append(str_path) |
|
300 | 299 | ensure_dir_exists(new) |
|
301 | 300 | readme = os.path.join(new, "README") |
|
302 | 301 | readme_src = os.path.join( |
|
303 | 302 | get_ipython_package_dir(), "config", "profile", "README" |
|
304 | 303 | ) |
|
305 | 304 | if not os.path.exists(readme) and os.path.exists(readme_src): |
|
306 | 305 | shutil.copy(readme_src, readme) |
|
307 | 306 | for d in ("extensions", "nbextensions"): |
|
308 | 307 | path = os.path.join(new, d) |
|
309 | 308 | try: |
|
310 | 309 | ensure_dir_exists(path) |
|
311 | 310 | except OSError as e: |
|
312 | 311 | # this will not be EEXIST |
|
313 | 312 | self.log.error("couldn't create path %s: %s", path, e) |
|
314 | 313 | self.log.debug("IPYTHONDIR set to: %s" % new) |
|
315 | 314 | |
|
316 | 315 | def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS): |
|
317 | 316 | """Load the config file. |
|
318 | 317 | |
|
319 | 318 | By default, errors in loading config are handled, and a warning |
|
320 | 319 | printed on screen. For testing, the suppress_errors option is set |
|
321 | 320 | to False, so errors will make tests fail. |
|
322 | 321 | |
|
323 | 322 | `suppress_errors` default value is to be `None` in which case the |
|
324 | 323 | behavior default to the one of `traitlets.Application`. |
|
325 | 324 | |
|
326 | 325 | The default value can be set : |
|
327 | 326 | - to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive). |
|
328 | 327 | - to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive). |
|
329 | 328 | - to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset. |
|
330 | 329 | |
|
331 | 330 | Any other value are invalid, and will make IPython exit with a non-zero return code. |
|
332 | 331 | """ |
|
333 | 332 | |
|
334 | 333 | |
|
335 | 334 | self.log.debug("Searching path %s for config files", self.config_file_paths) |
|
336 | 335 | base_config = 'ipython_config.py' |
|
337 | 336 | self.log.debug("Attempting to load config file: %s" % |
|
338 | 337 | base_config) |
|
339 | 338 | try: |
|
340 | 339 | if suppress_errors is not None: |
|
341 | 340 | old_value = Application.raise_config_file_errors |
|
342 | 341 | Application.raise_config_file_errors = not suppress_errors; |
|
343 | 342 | Application.load_config_file( |
|
344 | 343 | self, |
|
345 | 344 | base_config, |
|
346 | 345 | path=self.config_file_paths |
|
347 | 346 | ) |
|
348 | 347 | except ConfigFileNotFound: |
|
349 | 348 | # ignore errors loading parent |
|
350 | 349 | self.log.debug("Config file %s not found", base_config) |
|
351 | 350 | pass |
|
352 | 351 | if suppress_errors is not None: |
|
353 | 352 | Application.raise_config_file_errors = old_value |
|
354 | 353 | |
|
355 | 354 | for config_file_name in self.config_files: |
|
356 | 355 | if not config_file_name or config_file_name == base_config: |
|
357 | 356 | continue |
|
358 | 357 | self.log.debug("Attempting to load config file: %s" % |
|
359 | 358 | self.config_file_name) |
|
360 | 359 | try: |
|
361 | 360 | Application.load_config_file( |
|
362 | 361 | self, |
|
363 | 362 | config_file_name, |
|
364 | 363 | path=self.config_file_paths |
|
365 | 364 | ) |
|
366 | 365 | except ConfigFileNotFound: |
|
367 | 366 | # Only warn if the default config file was NOT being used. |
|
368 | 367 | if config_file_name in self.config_file_specified: |
|
369 | 368 | msg = self.log.warning |
|
370 | 369 | else: |
|
371 | 370 | msg = self.log.debug |
|
372 | 371 | msg("Config file not found, skipping: %s", config_file_name) |
|
373 | 372 | except Exception: |
|
374 | 373 | # For testing purposes. |
|
375 | 374 | if not suppress_errors: |
|
376 | 375 | raise |
|
377 | 376 | self.log.warning("Error loading config file: %s" % |
|
378 | 377 | self.config_file_name, exc_info=True) |
|
379 | 378 | |
|
380 | 379 | def init_profile_dir(self): |
|
381 | 380 | """initialize the profile dir""" |
|
382 | 381 | self._in_init_profile_dir = True |
|
383 | 382 | if self.profile_dir is not None: |
|
384 | 383 | # already ran |
|
385 | 384 | return |
|
386 | 385 | if 'ProfileDir.location' not in self.config: |
|
387 | 386 | # location not specified, find by profile name |
|
388 | 387 | try: |
|
389 | 388 | p = ProfileDir.find_profile_dir_by_name(self.ipython_dir, self.profile, self.config) |
|
390 | 389 | except ProfileDirError: |
|
391 | 390 | # not found, maybe create it (always create default profile) |
|
392 | 391 | if self.auto_create or self.profile == 'default': |
|
393 | 392 | try: |
|
394 | 393 | p = ProfileDir.create_profile_dir_by_name(self.ipython_dir, self.profile, self.config) |
|
395 | 394 | except ProfileDirError: |
|
396 | 395 | self.log.fatal("Could not create profile: %r"%self.profile) |
|
397 | 396 | self.exit(1) |
|
398 | 397 | else: |
|
399 | 398 | self.log.info("Created profile dir: %r"%p.location) |
|
400 | 399 | else: |
|
401 | 400 | self.log.fatal("Profile %r not found."%self.profile) |
|
402 | 401 | self.exit(1) |
|
403 | 402 | else: |
|
404 | 403 | self.log.debug(f"Using existing profile dir: {p.location!r}") |
|
405 | 404 | else: |
|
406 | 405 | location = self.config.ProfileDir.location |
|
407 | 406 | # location is fully specified |
|
408 | 407 | try: |
|
409 | 408 | p = ProfileDir.find_profile_dir(location, self.config) |
|
410 | 409 | except ProfileDirError: |
|
411 | 410 | # not found, maybe create it |
|
412 | 411 | if self.auto_create: |
|
413 | 412 | try: |
|
414 | 413 | p = ProfileDir.create_profile_dir(location, self.config) |
|
415 | 414 | except ProfileDirError: |
|
416 | 415 | self.log.fatal("Could not create profile directory: %r"%location) |
|
417 | 416 | self.exit(1) |
|
418 | 417 | else: |
|
419 | 418 | self.log.debug("Creating new profile dir: %r"%location) |
|
420 | 419 | else: |
|
421 | 420 | self.log.fatal("Profile directory %r not found."%location) |
|
422 | 421 | self.exit(1) |
|
423 | 422 | else: |
|
424 | 423 | self.log.debug(f"Using existing profile dir: {p.location!r}") |
|
425 | 424 | # if profile_dir is specified explicitly, set profile name |
|
426 | 425 | dir_name = os.path.basename(p.location) |
|
427 | 426 | if dir_name.startswith('profile_'): |
|
428 | 427 | self.profile = dir_name[8:] |
|
429 | 428 | |
|
430 | 429 | self.profile_dir = p |
|
431 | 430 | self.config_file_paths.append(p.location) |
|
432 | 431 | self._in_init_profile_dir = False |
|
433 | 432 | |
|
434 | 433 | def init_config_files(self): |
|
435 | 434 | """[optionally] copy default config files into profile dir.""" |
|
436 | 435 | self.config_file_paths.extend(ENV_CONFIG_DIRS) |
|
437 | 436 | self.config_file_paths.extend(SYSTEM_CONFIG_DIRS) |
|
438 | 437 | # copy config files |
|
439 | 438 | path = Path(self.builtin_profile_dir) |
|
440 | 439 | if self.copy_config_files: |
|
441 | 440 | src = self.profile |
|
442 | 441 | |
|
443 | 442 | cfg = self.config_file_name |
|
444 | 443 | if path and (path / cfg).exists(): |
|
445 | 444 | self.log.warning( |
|
446 | 445 | "Staging %r from %s into %r [overwrite=%s]" |
|
447 | 446 | % (cfg, src, self.profile_dir.location, self.overwrite) |
|
448 | 447 | ) |
|
449 | 448 | self.profile_dir.copy_config_file(cfg, path=path, overwrite=self.overwrite) |
|
450 | 449 | else: |
|
451 | 450 | self.stage_default_config_file() |
|
452 | 451 | else: |
|
453 | 452 | # Still stage *bundled* config files, but not generated ones |
|
454 | 453 | # This is necessary for `ipython profile=sympy` to load the profile |
|
455 | 454 | # on the first go |
|
456 | 455 | files = path.glob("*.py") |
|
457 | 456 | for fullpath in files: |
|
458 | 457 | cfg = fullpath.name |
|
459 | 458 | if self.profile_dir.copy_config_file(cfg, path=path, overwrite=False): |
|
460 | 459 | # file was copied |
|
461 | 460 | self.log.warning("Staging bundled %s from %s into %r"%( |
|
462 | 461 | cfg, self.profile, self.profile_dir.location) |
|
463 | 462 | ) |
|
464 | 463 | |
|
465 | 464 | |
|
466 | 465 | def stage_default_config_file(self): |
|
467 | 466 | """auto generate default config file, and stage it into the profile.""" |
|
468 | 467 | s = self.generate_config_file() |
|
469 | 468 | config_file = Path(self.profile_dir.location) / self.config_file_name |
|
470 | 469 | if self.overwrite or not config_file.exists(): |
|
471 | 470 | self.log.warning("Generating default config file: %r" % (config_file)) |
|
472 | 471 | config_file.write_text(s, encoding="utf-8") |
|
473 | 472 | |
|
474 | 473 | @catch_config_error |
|
475 | 474 | def initialize(self, argv=None): |
|
476 | 475 | # don't hook up crash handler before parsing command-line |
|
477 | 476 | self.parse_command_line(argv) |
|
478 | 477 | self.init_crash_handler() |
|
479 | 478 | if self.subapp is not None: |
|
480 | 479 | # stop here if subapp is taking over |
|
481 | 480 | return |
|
482 | 481 | # save a copy of CLI config to re-load after config files |
|
483 | 482 | # so that it has highest priority |
|
484 | 483 | cl_config = deepcopy(self.config) |
|
485 | 484 | self.init_profile_dir() |
|
486 | 485 | self.init_config_files() |
|
487 | 486 | self.load_config_file() |
|
488 | 487 | # enforce cl-opts override configfile opts: |
|
489 | 488 | self.update_config(cl_config) |
This diff has been collapsed as it changes many lines, (699 lines changed) Show them Hide them | |||
@@ -1,2977 +1,3322 b'' | |||
|
1 | 1 | """Completion for IPython. |
|
2 | 2 | |
|
3 | 3 | This module started as fork of the rlcompleter module in the Python standard |
|
4 | 4 | library. The original enhancements made to rlcompleter have been sent |
|
5 | 5 | upstream and were accepted as of Python 2.3, |
|
6 | 6 | |
|
7 | 7 | This module now support a wide variety of completion mechanism both available |
|
8 | 8 | for normal classic Python code, as well as completer for IPython specific |
|
9 | 9 | Syntax like magics. |
|
10 | 10 | |
|
11 | 11 | Latex and Unicode completion |
|
12 | 12 | ============================ |
|
13 | 13 | |
|
14 | 14 | IPython and compatible frontends not only can complete your code, but can help |
|
15 | 15 | you to input a wide range of characters. In particular we allow you to insert |
|
16 | 16 | a unicode character using the tab completion mechanism. |
|
17 | 17 | |
|
18 | 18 | Forward latex/unicode completion |
|
19 | 19 | -------------------------------- |
|
20 | 20 | |
|
21 | 21 | Forward completion allows you to easily type a unicode character using its latex |
|
22 | 22 | name, or unicode long description. To do so type a backslash follow by the |
|
23 | 23 | relevant name and press tab: |
|
24 | 24 | |
|
25 | 25 | |
|
26 | 26 | Using latex completion: |
|
27 | 27 | |
|
28 | 28 | .. code:: |
|
29 | 29 | |
|
30 | 30 | \\alpha<tab> |
|
31 | 31 | α |
|
32 | 32 | |
|
33 | 33 | or using unicode completion: |
|
34 | 34 | |
|
35 | 35 | |
|
36 | 36 | .. code:: |
|
37 | 37 | |
|
38 | 38 | \\GREEK SMALL LETTER ALPHA<tab> |
|
39 | 39 | α |
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | Only valid Python identifiers will complete. Combining characters (like arrow or |
|
43 | 43 | dots) are also available, unlike latex they need to be put after the their |
|
44 | 44 | counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``. |
|
45 | 45 | |
|
46 | 46 | Some browsers are known to display combining characters incorrectly. |
|
47 | 47 | |
|
48 | 48 | Backward latex completion |
|
49 | 49 | ------------------------- |
|
50 | 50 | |
|
51 | 51 | It is sometime challenging to know how to type a character, if you are using |
|
52 | 52 | IPython, or any compatible frontend you can prepend backslash to the character |
|
53 |
and press |
|
|
53 | and press :kbd:`Tab` to expand it to its latex form. | |
|
54 | 54 | |
|
55 | 55 | .. code:: |
|
56 | 56 | |
|
57 | 57 | \\α<tab> |
|
58 | 58 | \\alpha |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | Both forward and backward completions can be deactivated by setting the |
|
62 |
|
|
|
62 | :std:configtrait:`Completer.backslash_combining_completions` option to | |
|
63 | ``False``. | |
|
63 | 64 | |
|
64 | 65 | |
|
65 | 66 | Experimental |
|
66 | 67 | ============ |
|
67 | 68 | |
|
68 | 69 | Starting with IPython 6.0, this module can make use of the Jedi library to |
|
69 | 70 | generate completions both using static analysis of the code, and dynamically |
|
70 | 71 | inspecting multiple namespaces. Jedi is an autocompletion and static analysis |
|
71 | 72 | for Python. The APIs attached to this new mechanism is unstable and will |
|
72 | 73 | raise unless use in an :any:`provisionalcompleter` context manager. |
|
73 | 74 | |
|
74 | 75 | You will find that the following are experimental: |
|
75 | 76 | |
|
76 | 77 | - :any:`provisionalcompleter` |
|
77 | 78 | - :any:`IPCompleter.completions` |
|
78 | 79 | - :any:`Completion` |
|
79 | 80 | - :any:`rectify_completions` |
|
80 | 81 | |
|
81 | 82 | .. note:: |
|
82 | 83 | |
|
83 | 84 | better name for :any:`rectify_completions` ? |
|
84 | 85 | |
|
85 | 86 | We welcome any feedback on these new API, and we also encourage you to try this |
|
86 | 87 | module in debug mode (start IPython with ``--Completer.debug=True``) in order |
|
87 | 88 | to have extra logging information if :any:`jedi` is crashing, or if current |
|
88 | 89 | IPython completer pending deprecations are returning results not yet handled |
|
89 | 90 | by :any:`jedi` |
|
90 | 91 | |
|
91 | 92 | Using Jedi for tab completion allow snippets like the following to work without |
|
92 | 93 | having to execute any code: |
|
93 | 94 | |
|
94 | 95 | >>> myvar = ['hello', 42] |
|
95 | 96 | ... myvar[1].bi<tab> |
|
96 | 97 | |
|
97 | 98 | Tab completion will be able to infer that ``myvar[1]`` is a real number without |
|
98 |
executing any code unlike the |
|
|
99 | executing almost any code unlike the deprecated :any:`IPCompleter.greedy` | |
|
99 | 100 | option. |
|
100 | 101 | |
|
101 | 102 | Be sure to update :any:`jedi` to the latest stable version or to try the |
|
102 | 103 | current development version to get better completions. |
|
103 | 104 | |
|
104 | 105 | Matchers |
|
105 | 106 | ======== |
|
106 | 107 | |
|
107 | 108 | All completions routines are implemented using unified *Matchers* API. |
|
108 | 109 | The matchers API is provisional and subject to change without notice. |
|
109 | 110 | |
|
110 | 111 | The built-in matchers include: |
|
111 | 112 | |
|
112 | 113 | - :any:`IPCompleter.dict_key_matcher`: dictionary key completions, |
|
113 | 114 | - :any:`IPCompleter.magic_matcher`: completions for magics, |
|
114 | 115 | - :any:`IPCompleter.unicode_name_matcher`, |
|
115 | 116 | :any:`IPCompleter.fwd_unicode_matcher` |
|
116 | 117 | and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_, |
|
117 | 118 | - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_, |
|
118 | 119 | - :any:`IPCompleter.file_matcher`: paths to files and directories, |
|
119 | 120 | - :any:`IPCompleter.python_func_kw_matcher` - function keywords, |
|
120 | 121 | - :any:`IPCompleter.python_matches` - globals and attributes (v1 API), |
|
121 | 122 | - ``IPCompleter.jedi_matcher`` - static analysis with Jedi, |
|
122 | 123 | - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default |
|
123 | 124 | implementation in :any:`InteractiveShell` which uses IPython hooks system |
|
124 | 125 | (`complete_command`) with string dispatch (including regular expressions). |
|
125 | 126 | Differently to other matchers, ``custom_completer_matcher`` will not suppress |
|
126 | 127 | Jedi results to match behaviour in earlier IPython versions. |
|
127 | 128 | |
|
128 | 129 | Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list. |
|
129 | 130 | |
|
130 | 131 | Matcher API |
|
131 | 132 | ----------- |
|
132 | 133 | |
|
133 | 134 | Simplifying some details, the ``Matcher`` interface can described as |
|
134 | 135 | |
|
135 | 136 | .. code-block:: |
|
136 | 137 | |
|
137 | 138 | MatcherAPIv1 = Callable[[str], list[str]] |
|
138 | 139 | MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult] |
|
139 | 140 | |
|
140 | 141 | Matcher = MatcherAPIv1 | MatcherAPIv2 |
|
141 | 142 | |
|
142 | 143 | The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0 |
|
143 | 144 | and remains supported as a simplest way for generating completions. This is also |
|
144 | 145 | currently the only API supported by the IPython hooks system `complete_command`. |
|
145 | 146 | |
|
146 | 147 | To distinguish between matcher versions ``matcher_api_version`` attribute is used. |
|
147 | 148 | More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers, |
|
148 | 149 | and requires a literal ``2`` for v2 Matchers. |
|
149 | 150 | |
|
150 | 151 | Once the API stabilises future versions may relax the requirement for specifying |
|
151 | 152 | ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore |
|
152 | 153 | please do not rely on the presence of ``matcher_api_version`` for any purposes. |
|
153 | 154 | |
|
154 | 155 | Suppression of competing matchers |
|
155 | 156 | --------------------------------- |
|
156 | 157 | |
|
157 | 158 | By default results from all matchers are combined, in the order determined by |
|
158 | 159 | their priority. Matchers can request to suppress results from subsequent |
|
159 | 160 | matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``. |
|
160 | 161 | |
|
161 | 162 | When multiple matchers simultaneously request surpression, the results from of |
|
162 | 163 | the matcher with higher priority will be returned. |
|
163 | 164 | |
|
164 | 165 | Sometimes it is desirable to suppress most but not all other matchers; |
|
165 | 166 | this can be achieved by adding a list of identifiers of matchers which |
|
166 | 167 | should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key. |
|
167 | 168 | |
|
168 | 169 | The suppression behaviour can is user-configurable via |
|
169 |
: |
|
|
170 | :std:configtrait:`IPCompleter.suppress_competing_matchers`. | |
|
170 | 171 | """ |
|
171 | 172 | |
|
172 | 173 | |
|
173 | 174 | # Copyright (c) IPython Development Team. |
|
174 | 175 | # Distributed under the terms of the Modified BSD License. |
|
175 | 176 | # |
|
176 | 177 | # Some of this code originated from rlcompleter in the Python standard library |
|
177 | 178 | # Copyright (C) 2001 Python Software Foundation, www.python.org |
|
178 | 179 | |
|
179 | 180 | from __future__ import annotations |
|
180 | 181 | import builtins as builtin_mod |
|
182 | import enum | |
|
181 | 183 | import glob |
|
182 | 184 | import inspect |
|
183 | 185 | import itertools |
|
184 | 186 | import keyword |
|
185 | 187 | import os |
|
186 | 188 | import re |
|
187 | 189 | import string |
|
188 | 190 | import sys |
|
191 | import tokenize | |
|
189 | 192 | import time |
|
190 | 193 | import unicodedata |
|
191 | 194 | import uuid |
|
192 | 195 | import warnings |
|
196 | from ast import literal_eval | |
|
197 | from collections import defaultdict | |
|
193 | 198 | from contextlib import contextmanager |
|
194 | 199 | from dataclasses import dataclass |
|
195 | 200 | from functools import cached_property, partial |
|
196 | from importlib import import_module | |
|
197 | 201 | from types import SimpleNamespace |
|
198 | 202 | from typing import ( |
|
199 | 203 | Iterable, |
|
200 | 204 | Iterator, |
|
201 | 205 | List, |
|
202 | 206 | Tuple, |
|
203 | 207 | Union, |
|
204 | 208 | Any, |
|
205 | 209 | Sequence, |
|
206 | 210 | Dict, |
|
207 | NamedTuple, | |
|
208 | Pattern, | |
|
209 | 211 | Optional, |
|
210 | 212 | TYPE_CHECKING, |
|
211 | 213 | Set, |
|
214 | Sized, | |
|
215 | TypeVar, | |
|
212 | 216 | Literal, |
|
213 | 217 | ) |
|
214 | 218 | |
|
219 | from IPython.core.guarded_eval import guarded_eval, EvaluationContext | |
|
215 | 220 | from IPython.core.error import TryNext |
|
216 | 221 | from IPython.core.inputtransformer2 import ESC_MAGIC |
|
217 | 222 | from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol |
|
218 | 223 | from IPython.core.oinspect import InspectColors |
|
219 | 224 | from IPython.testing.skipdoctest import skip_doctest |
|
220 | 225 | from IPython.utils import generics |
|
221 | 226 | from IPython.utils.decorators import sphinx_options |
|
222 | 227 | from IPython.utils.dir2 import dir2, get_real_method |
|
223 | 228 | from IPython.utils.docs import GENERATING_DOCUMENTATION |
|
224 | 229 | from IPython.utils.path import ensure_dir_exists |
|
225 | 230 | from IPython.utils.process import arg_split |
|
226 | 231 | from traitlets import ( |
|
227 | 232 | Bool, |
|
228 | 233 | Enum, |
|
229 | 234 | Int, |
|
230 | 235 | List as ListTrait, |
|
231 | 236 | Unicode, |
|
232 | 237 | Dict as DictTrait, |
|
233 | 238 | Union as UnionTrait, |
|
234 | default, | |
|
235 | 239 | observe, |
|
236 | 240 | ) |
|
237 | 241 | from traitlets.config.configurable import Configurable |
|
238 | 242 | |
|
239 | 243 | import __main__ |
|
240 | 244 | |
|
241 | 245 | # skip module docstests |
|
242 | 246 | __skip_doctest__ = True |
|
243 | 247 | |
|
244 | 248 | |
|
245 | 249 | try: |
|
246 | 250 | import jedi |
|
247 | 251 | jedi.settings.case_insensitive_completion = False |
|
248 | 252 | import jedi.api.helpers |
|
249 | 253 | import jedi.api.classes |
|
250 | 254 | JEDI_INSTALLED = True |
|
251 | 255 | except ImportError: |
|
252 | 256 | JEDI_INSTALLED = False |
|
253 | 257 | |
|
254 | 258 | |
|
255 | if TYPE_CHECKING or GENERATING_DOCUMENTATION: | |
|
259 | if TYPE_CHECKING or GENERATING_DOCUMENTATION and sys.version_info >= (3, 11): | |
|
256 | 260 | from typing import cast |
|
257 | from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias | |
|
261 | from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard | |
|
258 | 262 | else: |
|
263 | from typing import Generic | |
|
259 | 264 | |
|
260 |
def cast( |
|
|
265 | def cast(type_, obj): | |
|
261 | 266 | """Workaround for `TypeError: MatcherAPIv2() takes no arguments`""" |
|
262 | 267 | return obj |
|
263 | 268 | |
|
264 | 269 | # do not require on runtime |
|
265 | 270 | NotRequired = Tuple # requires Python >=3.11 |
|
266 | 271 | TypedDict = Dict # by extension of `NotRequired` requires 3.11 too |
|
267 | 272 | Protocol = object # requires Python >=3.8 |
|
268 | 273 | TypeAlias = Any # requires Python >=3.10 |
|
274 | TypeGuard = Generic # requires Python >=3.10 | |
|
269 | 275 | if GENERATING_DOCUMENTATION: |
|
270 | 276 | from typing import TypedDict |
|
271 | 277 | |
|
272 | 278 | # ----------------------------------------------------------------------------- |
|
273 | 279 | # Globals |
|
274 | 280 | #----------------------------------------------------------------------------- |
|
275 | 281 | |
|
276 | 282 | # ranges where we have most of the valid unicode names. We could be more finer |
|
277 | 283 | # grained but is it worth it for performance While unicode have character in the |
|
278 | 284 | # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I |
|
279 | 285 | # write this). With below range we cover them all, with a density of ~67% |
|
280 | 286 | # biggest next gap we consider only adds up about 1% density and there are 600 |
|
281 | 287 | # gaps that would need hard coding. |
|
282 |
_UNICODE_RANGES = [(32, 0x3 |
|
|
288 | _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)] | |
|
283 | 289 | |
|
284 | 290 | # Public API |
|
285 | 291 | __all__ = ["Completer", "IPCompleter"] |
|
286 | 292 | |
|
287 | 293 | if sys.platform == 'win32': |
|
288 | 294 | PROTECTABLES = ' ' |
|
289 | 295 | else: |
|
290 | 296 | PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&' |
|
291 | 297 | |
|
292 | 298 | # Protect against returning an enormous number of completions which the frontend |
|
293 | 299 | # may have trouble processing. |
|
294 | 300 | MATCHES_LIMIT = 500 |
|
295 | 301 | |
|
296 | 302 | # Completion type reported when no type can be inferred. |
|
297 | 303 | _UNKNOWN_TYPE = "<unknown>" |
|
298 | 304 | |
|
305 | # sentinel value to signal lack of a match | |
|
306 | not_found = object() | |
|
307 | ||
|
299 | 308 | class ProvisionalCompleterWarning(FutureWarning): |
|
300 | 309 | """ |
|
301 | 310 | Exception raise by an experimental feature in this module. |
|
302 | 311 | |
|
303 | 312 | Wrap code in :any:`provisionalcompleter` context manager if you |
|
304 | 313 | are certain you want to use an unstable feature. |
|
305 | 314 | """ |
|
306 | 315 | pass |
|
307 | 316 | |
|
308 | 317 | warnings.filterwarnings('error', category=ProvisionalCompleterWarning) |
|
309 | 318 | |
|
310 | 319 | |
|
311 | 320 | @skip_doctest |
|
312 | 321 | @contextmanager |
|
313 | 322 | def provisionalcompleter(action='ignore'): |
|
314 | 323 | """ |
|
315 | 324 | This context manager has to be used in any place where unstable completer |
|
316 | 325 | behavior and API may be called. |
|
317 | 326 | |
|
318 | 327 | >>> with provisionalcompleter(): |
|
319 | 328 | ... completer.do_experimental_things() # works |
|
320 | 329 | |
|
321 | 330 | >>> completer.do_experimental_things() # raises. |
|
322 | 331 | |
|
323 | 332 | .. note:: |
|
324 | 333 | |
|
325 | 334 | Unstable |
|
326 | 335 | |
|
327 | 336 | By using this context manager you agree that the API in use may change |
|
328 | 337 | without warning, and that you won't complain if they do so. |
|
329 | 338 | |
|
330 | 339 | You also understand that, if the API is not to your liking, you should report |
|
331 | 340 | a bug to explain your use case upstream. |
|
332 | 341 | |
|
333 | 342 | We'll be happy to get your feedback, feature requests, and improvements on |
|
334 | 343 | any of the unstable APIs! |
|
335 | 344 | """ |
|
336 | 345 | with warnings.catch_warnings(): |
|
337 | 346 | warnings.filterwarnings(action, category=ProvisionalCompleterWarning) |
|
338 | 347 | yield |
|
339 | 348 | |
|
340 | 349 | |
|
341 | 350 | def has_open_quotes(s): |
|
342 | 351 | """Return whether a string has open quotes. |
|
343 | 352 | |
|
344 | 353 | This simply counts whether the number of quote characters of either type in |
|
345 | 354 | the string is odd. |
|
346 | 355 | |
|
347 | 356 | Returns |
|
348 | 357 | ------- |
|
349 | 358 | If there is an open quote, the quote character is returned. Else, return |
|
350 | 359 | False. |
|
351 | 360 | """ |
|
352 | 361 | # We check " first, then ', so complex cases with nested quotes will get |
|
353 | 362 | # the " to take precedence. |
|
354 | 363 | if s.count('"') % 2: |
|
355 | 364 | return '"' |
|
356 | 365 | elif s.count("'") % 2: |
|
357 | 366 | return "'" |
|
358 | 367 | else: |
|
359 | 368 | return False |
|
360 | 369 | |
|
361 | 370 | |
|
362 | 371 | def protect_filename(s, protectables=PROTECTABLES): |
|
363 | 372 | """Escape a string to protect certain characters.""" |
|
364 | 373 | if set(s) & set(protectables): |
|
365 | 374 | if sys.platform == "win32": |
|
366 | 375 | return '"' + s + '"' |
|
367 | 376 | else: |
|
368 | 377 | return "".join(("\\" + c if c in protectables else c) for c in s) |
|
369 | 378 | else: |
|
370 | 379 | return s |
|
371 | 380 | |
|
372 | 381 | |
|
373 | 382 | def expand_user(path:str) -> Tuple[str, bool, str]: |
|
374 | 383 | """Expand ``~``-style usernames in strings. |
|
375 | 384 | |
|
376 | 385 | This is similar to :func:`os.path.expanduser`, but it computes and returns |
|
377 | 386 | extra information that will be useful if the input was being used in |
|
378 | 387 | computing completions, and you wish to return the completions with the |
|
379 | 388 | original '~' instead of its expanded value. |
|
380 | 389 | |
|
381 | 390 | Parameters |
|
382 | 391 | ---------- |
|
383 | 392 | path : str |
|
384 | 393 | String to be expanded. If no ~ is present, the output is the same as the |
|
385 | 394 | input. |
|
386 | 395 | |
|
387 | 396 | Returns |
|
388 | 397 | ------- |
|
389 | 398 | newpath : str |
|
390 | 399 | Result of ~ expansion in the input path. |
|
391 | 400 | tilde_expand : bool |
|
392 | 401 | Whether any expansion was performed or not. |
|
393 | 402 | tilde_val : str |
|
394 | 403 | The value that ~ was replaced with. |
|
395 | 404 | """ |
|
396 | 405 | # Default values |
|
397 | 406 | tilde_expand = False |
|
398 | 407 | tilde_val = '' |
|
399 | 408 | newpath = path |
|
400 | 409 | |
|
401 | 410 | if path.startswith('~'): |
|
402 | 411 | tilde_expand = True |
|
403 | 412 | rest = len(path)-1 |
|
404 | 413 | newpath = os.path.expanduser(path) |
|
405 | 414 | if rest: |
|
406 | 415 | tilde_val = newpath[:-rest] |
|
407 | 416 | else: |
|
408 | 417 | tilde_val = newpath |
|
409 | 418 | |
|
410 | 419 | return newpath, tilde_expand, tilde_val |
|
411 | 420 | |
|
412 | 421 | |
|
413 | 422 | def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str: |
|
414 | 423 | """Does the opposite of expand_user, with its outputs. |
|
415 | 424 | """ |
|
416 | 425 | if tilde_expand: |
|
417 | 426 | return path.replace(tilde_val, '~') |
|
418 | 427 | else: |
|
419 | 428 | return path |
|
420 | 429 | |
|
421 | 430 | |
|
422 | 431 | def completions_sorting_key(word): |
|
423 | 432 | """key for sorting completions |
|
424 | 433 | |
|
425 | 434 | This does several things: |
|
426 | 435 | |
|
427 | 436 | - Demote any completions starting with underscores to the end |
|
428 | 437 | - Insert any %magic and %%cellmagic completions in the alphabetical order |
|
429 | 438 | by their name |
|
430 | 439 | """ |
|
431 | 440 | prio1, prio2 = 0, 0 |
|
432 | 441 | |
|
433 | 442 | if word.startswith('__'): |
|
434 | 443 | prio1 = 2 |
|
435 | 444 | elif word.startswith('_'): |
|
436 | 445 | prio1 = 1 |
|
437 | 446 | |
|
438 | 447 | if word.endswith('='): |
|
439 | 448 | prio1 = -1 |
|
440 | 449 | |
|
441 | 450 | if word.startswith('%%'): |
|
442 | 451 | # If there's another % in there, this is something else, so leave it alone |
|
443 | 452 | if not "%" in word[2:]: |
|
444 | 453 | word = word[2:] |
|
445 | 454 | prio2 = 2 |
|
446 | 455 | elif word.startswith('%'): |
|
447 | 456 | if not "%" in word[1:]: |
|
448 | 457 | word = word[1:] |
|
449 | 458 | prio2 = 1 |
|
450 | 459 | |
|
451 | 460 | return prio1, word, prio2 |
|
452 | 461 | |
|
453 | 462 | |
|
454 | 463 | class _FakeJediCompletion: |
|
455 | 464 | """ |
|
456 | 465 | This is a workaround to communicate to the UI that Jedi has crashed and to |
|
457 | 466 | report a bug. Will be used only id :any:`IPCompleter.debug` is set to true. |
|
458 | 467 | |
|
459 | 468 | Added in IPython 6.0 so should likely be removed for 7.0 |
|
460 | 469 | |
|
461 | 470 | """ |
|
462 | 471 | |
|
463 | 472 | def __init__(self, name): |
|
464 | 473 | |
|
465 | 474 | self.name = name |
|
466 | 475 | self.complete = name |
|
467 | 476 | self.type = 'crashed' |
|
468 | 477 | self.name_with_symbols = name |
|
469 |
self.signature = |
|
|
470 |
self._origin = |
|
|
478 | self.signature = "" | |
|
479 | self._origin = "fake" | |
|
480 | self.text = "crashed" | |
|
471 | 481 | |
|
472 | 482 | def __repr__(self): |
|
473 | 483 | return '<Fake completion object jedi has crashed>' |
|
474 | 484 | |
|
475 | 485 | |
|
476 | 486 | _JediCompletionLike = Union[jedi.api.Completion, _FakeJediCompletion] |
|
477 | 487 | |
|
478 | 488 | |
|
479 | 489 | class Completion: |
|
480 | 490 | """ |
|
481 | 491 | Completion object used and returned by IPython completers. |
|
482 | 492 | |
|
483 | 493 | .. warning:: |
|
484 | 494 | |
|
485 | 495 | Unstable |
|
486 | 496 | |
|
487 | 497 | This function is unstable, API may change without warning. |
|
488 | 498 | It will also raise unless use in proper context manager. |
|
489 | 499 | |
|
490 | 500 | This act as a middle ground :any:`Completion` object between the |
|
491 | 501 | :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion |
|
492 | 502 | object. While Jedi need a lot of information about evaluator and how the |
|
493 | 503 | code should be ran/inspected, PromptToolkit (and other frontend) mostly |
|
494 | 504 | need user facing information. |
|
495 | 505 | |
|
496 | 506 | - Which range should be replaced replaced by what. |
|
497 | 507 | - Some metadata (like completion type), or meta information to displayed to |
|
498 | 508 | the use user. |
|
499 | 509 | |
|
500 | 510 | For debugging purpose we can also store the origin of the completion (``jedi``, |
|
501 | 511 | ``IPython.python_matches``, ``IPython.magics_matches``...). |
|
502 | 512 | """ |
|
503 | 513 | |
|
504 | 514 | __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin'] |
|
505 | 515 | |
|
506 | def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None: | |
|
507 | warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). " | |
|
516 | def __init__( | |
|
517 | self, | |
|
518 | start: int, | |
|
519 | end: int, | |
|
520 | text: str, | |
|
521 | *, | |
|
522 | type: Optional[str] = None, | |
|
523 | _origin="", | |
|
524 | signature="", | |
|
525 | ) -> None: | |
|
526 | warnings.warn( | |
|
527 | "``Completion`` is a provisional API (as of IPython 6.0). " | |
|
508 | 528 |
|
|
509 | 529 |
|
|
510 |
|
|
|
530 | category=ProvisionalCompleterWarning, | |
|
531 | stacklevel=2, | |
|
532 | ) | |
|
511 | 533 | |
|
512 | 534 | self.start = start |
|
513 | 535 | self.end = end |
|
514 | 536 | self.text = text |
|
515 | 537 | self.type = type |
|
516 | 538 | self.signature = signature |
|
517 | 539 | self._origin = _origin |
|
518 | 540 | |
|
519 | 541 | def __repr__(self): |
|
520 | 542 | return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \ |
|
521 | 543 | (self.start, self.end, self.text, self.type or '?', self.signature or '?') |
|
522 | 544 | |
|
523 |
def __eq__(self, other)-> |
|
|
545 | def __eq__(self, other) -> bool: | |
|
524 | 546 | """ |
|
525 | 547 | Equality and hash do not hash the type (as some completer may not be |
|
526 | 548 | able to infer the type), but are use to (partially) de-duplicate |
|
527 | 549 | completion. |
|
528 | 550 | |
|
529 | 551 | Completely de-duplicating completion is a bit tricker that just |
|
530 | 552 | comparing as it depends on surrounding text, which Completions are not |
|
531 | 553 | aware of. |
|
532 | 554 | """ |
|
533 | 555 | return self.start == other.start and \ |
|
534 | 556 | self.end == other.end and \ |
|
535 | 557 | self.text == other.text |
|
536 | 558 | |
|
537 | 559 | def __hash__(self): |
|
538 | 560 | return hash((self.start, self.end, self.text)) |
|
539 | 561 | |
|
540 | 562 | |
|
541 | 563 | class SimpleCompletion: |
|
542 | 564 | """Completion item to be included in the dictionary returned by new-style Matcher (API v2). |
|
543 | 565 | |
|
544 | 566 | .. warning:: |
|
545 | 567 | |
|
546 | 568 | Provisional |
|
547 | 569 | |
|
548 | 570 | This class is used to describe the currently supported attributes of |
|
549 | 571 | simple completion items, and any additional implementation details |
|
550 | 572 | should not be relied on. Additional attributes may be included in |
|
551 | 573 | future versions, and meaning of text disambiguated from the current |
|
552 | 574 | dual meaning of "text to insert" and "text to used as a label". |
|
553 | 575 | """ |
|
554 | 576 | |
|
555 | 577 | __slots__ = ["text", "type"] |
|
556 | 578 | |
|
557 | def __init__(self, text: str, *, type: str = None): | |
|
579 | def __init__(self, text: str, *, type: Optional[str] = None): | |
|
558 | 580 | self.text = text |
|
559 | 581 | self.type = type |
|
560 | 582 | |
|
561 | 583 | def __repr__(self): |
|
562 | 584 | return f"<SimpleCompletion text={self.text!r} type={self.type!r}>" |
|
563 | 585 | |
|
564 | 586 | |
|
565 | 587 | class _MatcherResultBase(TypedDict): |
|
566 | 588 | """Definition of dictionary to be returned by new-style Matcher (API v2).""" |
|
567 | 589 | |
|
568 | 590 | #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token. |
|
569 | 591 | matched_fragment: NotRequired[str] |
|
570 | 592 | |
|
571 | 593 | #: Whether to suppress results from all other matchers (True), some |
|
572 | 594 | #: matchers (set of identifiers) or none (False); default is False. |
|
573 | 595 | suppress: NotRequired[Union[bool, Set[str]]] |
|
574 | 596 | |
|
575 | 597 | #: Identifiers of matchers which should NOT be suppressed when this matcher |
|
576 | 598 | #: requests to suppress all other matchers; defaults to an empty set. |
|
577 | 599 | do_not_suppress: NotRequired[Set[str]] |
|
578 | 600 | |
|
579 | 601 | #: Are completions already ordered and should be left as-is? default is False. |
|
580 | 602 | ordered: NotRequired[bool] |
|
581 | 603 | |
|
582 | 604 | |
|
583 | 605 | @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"]) |
|
584 | 606 | class SimpleMatcherResult(_MatcherResultBase, TypedDict): |
|
585 | 607 | """Result of new-style completion matcher.""" |
|
586 | 608 | |
|
587 | 609 | # note: TypedDict is added again to the inheritance chain |
|
588 | 610 | # in order to get __orig_bases__ for documentation |
|
589 | 611 | |
|
590 | 612 | #: List of candidate completions |
|
591 | completions: Sequence[SimpleCompletion] | |
|
613 | completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion] | |
|
592 | 614 | |
|
593 | 615 | |
|
594 | 616 | class _JediMatcherResult(_MatcherResultBase): |
|
595 | 617 | """Matching result returned by Jedi (will be processed differently)""" |
|
596 | 618 | |
|
597 | 619 | #: list of candidate completions |
|
598 |
completions: Itera |
|
|
620 | completions: Iterator[_JediCompletionLike] | |
|
621 | ||
|
622 | ||
|
623 | AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion] | |
|
624 | AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion) | |
|
599 | 625 | |
|
600 | 626 | |
|
601 | 627 | @dataclass |
|
602 | 628 | class CompletionContext: |
|
603 | 629 | """Completion context provided as an argument to matchers in the Matcher API v2.""" |
|
604 | 630 | |
|
605 | 631 | # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`) |
|
606 | 632 | # which was not explicitly visible as an argument of the matcher, making any refactor |
|
607 | 633 | # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers |
|
608 | 634 | # from the completer, and make substituting them in sub-classes easier. |
|
609 | 635 | |
|
610 | 636 | #: Relevant fragment of code directly preceding the cursor. |
|
611 | 637 | #: The extraction of token is implemented via splitter heuristic |
|
612 | 638 | #: (following readline behaviour for legacy reasons), which is user configurable |
|
613 | 639 | #: (by switching the greedy mode). |
|
614 | 640 | token: str |
|
615 | 641 | |
|
616 | 642 | #: The full available content of the editor or buffer |
|
617 | 643 | full_text: str |
|
618 | 644 | |
|
619 | 645 | #: Cursor position in the line (the same for ``full_text`` and ``text``). |
|
620 | 646 | cursor_position: int |
|
621 | 647 | |
|
622 | 648 | #: Cursor line in ``full_text``. |
|
623 | 649 | cursor_line: int |
|
624 | 650 | |
|
625 | 651 | #: The maximum number of completions that will be used downstream. |
|
626 | 652 | #: Matchers can use this information to abort early. |
|
627 | 653 | #: The built-in Jedi matcher is currently excepted from this limit. |
|
628 | 654 | # If not given, return all possible completions. |
|
629 | 655 | limit: Optional[int] |
|
630 | 656 | |
|
631 | 657 | @cached_property |
|
632 | 658 | def text_until_cursor(self) -> str: |
|
633 | 659 | return self.line_with_cursor[: self.cursor_position] |
|
634 | 660 | |
|
635 | 661 | @cached_property |
|
636 | 662 | def line_with_cursor(self) -> str: |
|
637 | 663 | return self.full_text.split("\n")[self.cursor_line] |
|
638 | 664 | |
|
639 | 665 | |
|
640 | 666 | #: Matcher results for API v2. |
|
641 | 667 | MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult] |
|
642 | 668 | |
|
643 | 669 | |
|
644 | 670 | class _MatcherAPIv1Base(Protocol): |
|
645 |
def __call__(self, text: str) -> |
|
|
671 | def __call__(self, text: str) -> List[str]: | |
|
646 | 672 | """Call signature.""" |
|
673 | ... | |
|
674 | ||
|
675 | #: Used to construct the default matcher identifier | |
|
676 | __qualname__: str | |
|
647 | 677 | |
|
648 | 678 | |
|
649 | 679 | class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol): |
|
650 | 680 | #: API version |
|
651 | 681 | matcher_api_version: Optional[Literal[1]] |
|
652 | 682 | |
|
653 |
def __call__(self, text: str) -> |
|
|
683 | def __call__(self, text: str) -> List[str]: | |
|
654 | 684 | """Call signature.""" |
|
685 | ... | |
|
655 | 686 | |
|
656 | 687 | |
|
657 | 688 | #: Protocol describing Matcher API v1. |
|
658 | 689 | MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total] |
|
659 | 690 | |
|
660 | 691 | |
|
661 | 692 | class MatcherAPIv2(Protocol): |
|
662 | 693 | """Protocol describing Matcher API v2.""" |
|
663 | 694 | |
|
664 | 695 | #: API version |
|
665 | 696 | matcher_api_version: Literal[2] = 2 |
|
666 | 697 | |
|
667 | 698 | def __call__(self, context: CompletionContext) -> MatcherResult: |
|
668 | 699 | """Call signature.""" |
|
700 | ... | |
|
701 | ||
|
702 | #: Used to construct the default matcher identifier | |
|
703 | __qualname__: str | |
|
669 | 704 | |
|
670 | 705 | |
|
671 | 706 | Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2] |
|
672 | 707 | |
|
673 | 708 | |
|
709 | def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]: | |
|
710 | api_version = _get_matcher_api_version(matcher) | |
|
711 | return api_version == 1 | |
|
712 | ||
|
713 | ||
|
714 | def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]: | |
|
715 | api_version = _get_matcher_api_version(matcher) | |
|
716 | return api_version == 2 | |
|
717 | ||
|
718 | ||
|
719 | def _is_sizable(value: Any) -> TypeGuard[Sized]: | |
|
720 | """Determines whether objects is sizable""" | |
|
721 | return hasattr(value, "__len__") | |
|
722 | ||
|
723 | ||
|
724 | def _is_iterator(value: Any) -> TypeGuard[Iterator]: | |
|
725 | """Determines whether objects is sizable""" | |
|
726 | return hasattr(value, "__next__") | |
|
727 | ||
|
728 | ||
|
674 | 729 | def has_any_completions(result: MatcherResult) -> bool: |
|
675 | 730 | """Check if any result includes any completions.""" |
|
676 |
|
|
|
677 | return len(result["completions"]) != 0 | |
|
731 | completions = result["completions"] | |
|
732 | if _is_sizable(completions): | |
|
733 | return len(completions) != 0 | |
|
734 | if _is_iterator(completions): | |
|
678 | 735 | try: |
|
679 |
old_iterator = |
|
|
736 | old_iterator = completions | |
|
680 | 737 | first = next(old_iterator) |
|
681 |
result["completions"] = |
|
|
738 | result["completions"] = cast( | |
|
739 | Iterator[SimpleCompletion], | |
|
740 | itertools.chain([first], old_iterator), | |
|
741 | ) | |
|
682 | 742 | return True |
|
683 | 743 | except StopIteration: |
|
684 | 744 | return False |
|
745 | raise ValueError( | |
|
746 | "Completions returned by matcher need to be an Iterator or a Sizable" | |
|
747 | ) | |
|
685 | 748 | |
|
686 | 749 | |
|
687 | 750 | def completion_matcher( |
|
688 | *, priority: float = None, identifier: str = None, api_version: int = 1 | |
|
751 | *, | |
|
752 | priority: Optional[float] = None, | |
|
753 | identifier: Optional[str] = None, | |
|
754 | api_version: int = 1, | |
|
689 | 755 | ): |
|
690 | 756 | """Adds attributes describing the matcher. |
|
691 | 757 | |
|
692 | 758 | Parameters |
|
693 | 759 | ---------- |
|
694 | 760 | priority : Optional[float] |
|
695 | 761 | The priority of the matcher, determines the order of execution of matchers. |
|
696 | 762 | Higher priority means that the matcher will be executed first. Defaults to 0. |
|
697 | 763 | identifier : Optional[str] |
|
698 | 764 | identifier of the matcher allowing users to modify the behaviour via traitlets, |
|
699 | 765 | and also used to for debugging (will be passed as ``origin`` with the completions). |
|
700 | 766 | |
|
701 | 767 | Defaults to matcher function's ``__qualname__`` (for example, |
|
702 | 768 | ``IPCompleter.file_matcher`` for the built-in matched defined |
|
703 | 769 | as a ``file_matcher`` method of the ``IPCompleter`` class). |
|
704 | 770 | api_version: Optional[int] |
|
705 | 771 | version of the Matcher API used by this matcher. |
|
706 | 772 | Currently supported values are 1 and 2. |
|
707 | 773 | Defaults to 1. |
|
708 | 774 | """ |
|
709 | 775 | |
|
710 | 776 | def wrapper(func: Matcher): |
|
711 | func.matcher_priority = priority or 0 | |
|
712 | func.matcher_identifier = identifier or func.__qualname__ | |
|
713 | func.matcher_api_version = api_version | |
|
777 | func.matcher_priority = priority or 0 # type: ignore | |
|
778 | func.matcher_identifier = identifier or func.__qualname__ # type: ignore | |
|
779 | func.matcher_api_version = api_version # type: ignore | |
|
714 | 780 | if TYPE_CHECKING: |
|
715 | 781 | if api_version == 1: |
|
716 |
func = cast( |
|
|
782 | func = cast(MatcherAPIv1, func) | |
|
717 | 783 | elif api_version == 2: |
|
718 |
func = cast( |
|
|
784 | func = cast(MatcherAPIv2, func) | |
|
719 | 785 | return func |
|
720 | 786 | |
|
721 | 787 | return wrapper |
|
722 | 788 | |
|
723 | 789 | |
|
724 | 790 | def _get_matcher_priority(matcher: Matcher): |
|
725 | 791 | return getattr(matcher, "matcher_priority", 0) |
|
726 | 792 | |
|
727 | 793 | |
|
728 | 794 | def _get_matcher_id(matcher: Matcher): |
|
729 | 795 | return getattr(matcher, "matcher_identifier", matcher.__qualname__) |
|
730 | 796 | |
|
731 | 797 | |
|
732 | 798 | def _get_matcher_api_version(matcher): |
|
733 | 799 | return getattr(matcher, "matcher_api_version", 1) |
|
734 | 800 | |
|
735 | 801 | |
|
736 | 802 | context_matcher = partial(completion_matcher, api_version=2) |
|
737 | 803 | |
|
738 | 804 | |
|
739 | 805 | _IC = Iterable[Completion] |
|
740 | 806 | |
|
741 | 807 | |
|
742 | 808 | def _deduplicate_completions(text: str, completions: _IC)-> _IC: |
|
743 | 809 | """ |
|
744 | 810 | Deduplicate a set of completions. |
|
745 | 811 | |
|
746 | 812 | .. warning:: |
|
747 | 813 | |
|
748 | 814 | Unstable |
|
749 | 815 | |
|
750 | 816 | This function is unstable, API may change without warning. |
|
751 | 817 | |
|
752 | 818 | Parameters |
|
753 | 819 | ---------- |
|
754 | 820 | text : str |
|
755 | 821 | text that should be completed. |
|
756 | 822 | completions : Iterator[Completion] |
|
757 | 823 | iterator over the completions to deduplicate |
|
758 | 824 | |
|
759 | 825 | Yields |
|
760 | 826 | ------ |
|
761 | 827 | `Completions` objects |
|
762 | 828 | Completions coming from multiple sources, may be different but end up having |
|
763 | 829 | the same effect when applied to ``text``. If this is the case, this will |
|
764 | 830 | consider completions as equal and only emit the first encountered. |
|
765 | 831 | Not folded in `completions()` yet for debugging purpose, and to detect when |
|
766 | 832 | the IPython completer does return things that Jedi does not, but should be |
|
767 | 833 | at some point. |
|
768 | 834 | """ |
|
769 | 835 | completions = list(completions) |
|
770 | 836 | if not completions: |
|
771 | 837 | return |
|
772 | 838 | |
|
773 | 839 | new_start = min(c.start for c in completions) |
|
774 | 840 | new_end = max(c.end for c in completions) |
|
775 | 841 | |
|
776 | 842 | seen = set() |
|
777 | 843 | for c in completions: |
|
778 | 844 | new_text = text[new_start:c.start] + c.text + text[c.end:new_end] |
|
779 | 845 | if new_text not in seen: |
|
780 | 846 | yield c |
|
781 | 847 | seen.add(new_text) |
|
782 | 848 | |
|
783 | 849 | |
|
784 | 850 | def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC: |
|
785 | 851 | """ |
|
786 | 852 | Rectify a set of completions to all have the same ``start`` and ``end`` |
|
787 | 853 | |
|
788 | 854 | .. warning:: |
|
789 | 855 | |
|
790 | 856 | Unstable |
|
791 | 857 | |
|
792 | 858 | This function is unstable, API may change without warning. |
|
793 | 859 | It will also raise unless use in proper context manager. |
|
794 | 860 | |
|
795 | 861 | Parameters |
|
796 | 862 | ---------- |
|
797 | 863 | text : str |
|
798 | 864 | text that should be completed. |
|
799 | 865 | completions : Iterator[Completion] |
|
800 | 866 | iterator over the completions to rectify |
|
801 | 867 | _debug : bool |
|
802 | 868 | Log failed completion |
|
803 | 869 | |
|
804 | 870 | Notes |
|
805 | 871 | ----- |
|
806 | 872 | :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though |
|
807 | 873 | the Jupyter Protocol requires them to behave like so. This will readjust |
|
808 | 874 | the completion to have the same ``start`` and ``end`` by padding both |
|
809 | 875 | extremities with surrounding text. |
|
810 | 876 | |
|
811 | 877 | During stabilisation should support a ``_debug`` option to log which |
|
812 | 878 | completion are return by the IPython completer and not found in Jedi in |
|
813 | 879 | order to make upstream bug report. |
|
814 | 880 | """ |
|
815 | 881 | warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). " |
|
816 | 882 | "It may change without warnings. " |
|
817 | 883 | "Use in corresponding context manager.", |
|
818 | 884 | category=ProvisionalCompleterWarning, stacklevel=2) |
|
819 | 885 | |
|
820 | 886 | completions = list(completions) |
|
821 | 887 | if not completions: |
|
822 | 888 | return |
|
823 | 889 | starts = (c.start for c in completions) |
|
824 | 890 | ends = (c.end for c in completions) |
|
825 | 891 | |
|
826 | 892 | new_start = min(starts) |
|
827 | 893 | new_end = max(ends) |
|
828 | 894 | |
|
829 | 895 | seen_jedi = set() |
|
830 | 896 | seen_python_matches = set() |
|
831 | 897 | for c in completions: |
|
832 | 898 | new_text = text[new_start:c.start] + c.text + text[c.end:new_end] |
|
833 | 899 | if c._origin == 'jedi': |
|
834 | 900 | seen_jedi.add(new_text) |
|
835 | 901 | elif c._origin == 'IPCompleter.python_matches': |
|
836 | 902 | seen_python_matches.add(new_text) |
|
837 | 903 | yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature) |
|
838 | 904 | diff = seen_python_matches.difference(seen_jedi) |
|
839 | 905 | if diff and _debug: |
|
840 | 906 | print('IPython.python matches have extras:', diff) |
|
841 | 907 | |
|
842 | 908 | |
|
843 | 909 | if sys.platform == 'win32': |
|
844 | 910 | DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?' |
|
845 | 911 | else: |
|
846 | 912 | DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?' |
|
847 | 913 | |
|
848 | 914 | GREEDY_DELIMS = ' =\r\n' |
|
849 | 915 | |
|
850 | 916 | |
|
851 | 917 | class CompletionSplitter(object): |
|
852 | 918 | """An object to split an input line in a manner similar to readline. |
|
853 | 919 | |
|
854 | 920 | By having our own implementation, we can expose readline-like completion in |
|
855 | 921 | a uniform manner to all frontends. This object only needs to be given the |
|
856 | 922 | line of text to be split and the cursor position on said line, and it |
|
857 | 923 | returns the 'word' to be completed on at the cursor after splitting the |
|
858 | 924 | entire line. |
|
859 | 925 | |
|
860 | 926 | What characters are used as splitting delimiters can be controlled by |
|
861 | 927 | setting the ``delims`` attribute (this is a property that internally |
|
862 | 928 | automatically builds the necessary regular expression)""" |
|
863 | 929 | |
|
864 | 930 | # Private interface |
|
865 | 931 | |
|
866 | 932 | # A string of delimiter characters. The default value makes sense for |
|
867 | 933 | # IPython's most typical usage patterns. |
|
868 | 934 | _delims = DELIMS |
|
869 | 935 | |
|
870 | 936 | # The expression (a normal string) to be compiled into a regular expression |
|
871 | 937 | # for actual splitting. We store it as an attribute mostly for ease of |
|
872 | 938 | # debugging, since this type of code can be so tricky to debug. |
|
873 | 939 | _delim_expr = None |
|
874 | 940 | |
|
875 | 941 | # The regular expression that does the actual splitting |
|
876 | 942 | _delim_re = None |
|
877 | 943 | |
|
878 | 944 | def __init__(self, delims=None): |
|
879 | 945 | delims = CompletionSplitter._delims if delims is None else delims |
|
880 | 946 | self.delims = delims |
|
881 | 947 | |
|
882 | 948 | @property |
|
883 | 949 | def delims(self): |
|
884 | 950 | """Return the string of delimiter characters.""" |
|
885 | 951 | return self._delims |
|
886 | 952 | |
|
887 | 953 | @delims.setter |
|
888 | 954 | def delims(self, delims): |
|
889 | 955 | """Set the delimiters for line splitting.""" |
|
890 | 956 | expr = '[' + ''.join('\\'+ c for c in delims) + ']' |
|
891 | 957 | self._delim_re = re.compile(expr) |
|
892 | 958 | self._delims = delims |
|
893 | 959 | self._delim_expr = expr |
|
894 | 960 | |
|
895 | 961 | def split_line(self, line, cursor_pos=None): |
|
896 | 962 | """Split a line of text with a cursor at the given position. |
|
897 | 963 | """ |
|
898 | 964 | l = line if cursor_pos is None else line[:cursor_pos] |
|
899 | 965 | return self._delim_re.split(l)[-1] |
|
900 | 966 | |
|
901 | 967 | |
|
902 | 968 | |
|
903 | 969 | class Completer(Configurable): |
|
904 | 970 | |
|
905 |
greedy = Bool( |
|
|
906 | help="""Activate greedy completion | |
|
907 | PENDING DEPRECATION. this is now mostly taken care of with Jedi. | |
|
971 | greedy = Bool( | |
|
972 | False, | |
|
973 | help="""Activate greedy completion. | |
|
974 | ||
|
975 | .. deprecated:: 8.8 | |
|
976 | Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead. | |
|
908 | 977 | |
|
909 | This will enable completion on elements of lists, results of function calls, etc., | |
|
910 | but can be unsafe because the code is actually evaluated on TAB. | |
|
978 | When enabled in IPython 8.8 or newer, changes configuration as follows: | |
|
979 | ||
|
980 | - ``Completer.evaluation = 'unsafe'`` | |
|
981 | - ``Completer.auto_close_dict_keys = True`` | |
|
982 | """, | |
|
983 | ).tag(config=True) | |
|
984 | ||
|
985 | evaluation = Enum( | |
|
986 | ("forbidden", "minimal", "limited", "unsafe", "dangerous"), | |
|
987 | default_value="limited", | |
|
988 | help="""Policy for code evaluation under completion. | |
|
989 | ||
|
990 | Successive options allow to enable more eager evaluation for better | |
|
991 | completion suggestions, including for nested dictionaries, nested lists, | |
|
992 | or even results of function calls. | |
|
993 | Setting ``unsafe`` or higher can lead to evaluation of arbitrary user | |
|
994 | code on :kbd:`Tab` with potentially unwanted or dangerous side effects. | |
|
995 | ||
|
996 | Allowed values are: | |
|
997 | ||
|
998 | - ``forbidden``: no evaluation of code is permitted, | |
|
999 | - ``minimal``: evaluation of literals and access to built-in namespace; | |
|
1000 | no item/attribute evaluationm no access to locals/globals, | |
|
1001 | no evaluation of any operations or comparisons. | |
|
1002 | - ``limited``: access to all namespaces, evaluation of hard-coded methods | |
|
1003 | (for example: :any:`dict.keys`, :any:`object.__getattr__`, | |
|
1004 | :any:`object.__getitem__`) on allow-listed objects (for example: | |
|
1005 | :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``), | |
|
1006 | - ``unsafe``: evaluation of all methods and function calls but not of | |
|
1007 | syntax with side-effects like `del x`, | |
|
1008 | - ``dangerous``: completely arbitrary evaluation. | |
|
911 | 1009 | """, |
|
912 | 1010 | ).tag(config=True) |
|
913 | 1011 | |
|
914 | 1012 | use_jedi = Bool(default_value=JEDI_INSTALLED, |
|
915 | 1013 | help="Experimental: Use Jedi to generate autocompletions. " |
|
916 | 1014 | "Default to True if jedi is installed.").tag(config=True) |
|
917 | 1015 | |
|
918 | 1016 | jedi_compute_type_timeout = Int(default_value=400, |
|
919 | 1017 | help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types. |
|
920 | 1018 | Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt |
|
921 | 1019 | performance by preventing jedi to build its cache. |
|
922 | 1020 | """).tag(config=True) |
|
923 | 1021 | |
|
924 | 1022 | debug = Bool(default_value=False, |
|
925 | 1023 | help='Enable debug for the Completer. Mostly print extra ' |
|
926 | 1024 | 'information for experimental jedi integration.')\ |
|
927 | 1025 | .tag(config=True) |
|
928 | 1026 | |
|
929 | 1027 | backslash_combining_completions = Bool(True, |
|
930 | 1028 | help="Enable unicode completions, e.g. \\alpha<tab> . " |
|
931 | 1029 | "Includes completion of latex commands, unicode names, and expanding " |
|
932 | 1030 | "unicode characters back to latex commands.").tag(config=True) |
|
933 | 1031 | |
|
1032 | auto_close_dict_keys = Bool( | |
|
1033 | False, | |
|
1034 | help=""" | |
|
1035 | Enable auto-closing dictionary keys. | |
|
1036 | ||
|
1037 | When enabled string keys will be suffixed with a final quote | |
|
1038 | (matching the opening quote), tuple keys will also receive a | |
|
1039 | separating comma if needed, and keys which are final will | |
|
1040 | receive a closing bracket (``]``). | |
|
1041 | """, | |
|
1042 | ).tag(config=True) | |
|
1043 | ||
|
934 | 1044 | def __init__(self, namespace=None, global_namespace=None, **kwargs): |
|
935 | 1045 | """Create a new completer for the command line. |
|
936 | 1046 | |
|
937 | 1047 | Completer(namespace=ns, global_namespace=ns2) -> completer instance. |
|
938 | 1048 | |
|
939 | 1049 | If unspecified, the default namespace where completions are performed |
|
940 | 1050 | is __main__ (technically, __main__.__dict__). Namespaces should be |
|
941 | 1051 | given as dictionaries. |
|
942 | 1052 | |
|
943 | 1053 | An optional second namespace can be given. This allows the completer |
|
944 | 1054 | to handle cases where both the local and global scopes need to be |
|
945 | 1055 | distinguished. |
|
946 | 1056 | """ |
|
947 | 1057 | |
|
948 | 1058 | # Don't bind to namespace quite yet, but flag whether the user wants a |
|
949 | 1059 | # specific namespace or to use __main__.__dict__. This will allow us |
|
950 | 1060 | # to bind to __main__.__dict__ at completion time, not now. |
|
951 | 1061 | if namespace is None: |
|
952 | 1062 | self.use_main_ns = True |
|
953 | 1063 | else: |
|
954 | 1064 | self.use_main_ns = False |
|
955 | 1065 | self.namespace = namespace |
|
956 | 1066 | |
|
957 | 1067 | # The global namespace, if given, can be bound directly |
|
958 | 1068 | if global_namespace is None: |
|
959 | 1069 | self.global_namespace = {} |
|
960 | 1070 | else: |
|
961 | 1071 | self.global_namespace = global_namespace |
|
962 | 1072 | |
|
963 | 1073 | self.custom_matchers = [] |
|
964 | 1074 | |
|
965 | 1075 | super(Completer, self).__init__(**kwargs) |
|
966 | 1076 | |
|
967 | 1077 | def complete(self, text, state): |
|
968 | 1078 | """Return the next possible completion for 'text'. |
|
969 | 1079 | |
|
970 | 1080 | This is called successively with state == 0, 1, 2, ... until it |
|
971 | 1081 | returns None. The completion should begin with 'text'. |
|
972 | 1082 | |
|
973 | 1083 | """ |
|
974 | 1084 | if self.use_main_ns: |
|
975 | 1085 | self.namespace = __main__.__dict__ |
|
976 | 1086 | |
|
977 | 1087 | if state == 0: |
|
978 | 1088 | if "." in text: |
|
979 | 1089 | self.matches = self.attr_matches(text) |
|
980 | 1090 | else: |
|
981 | 1091 | self.matches = self.global_matches(text) |
|
982 | 1092 | try: |
|
983 | 1093 | return self.matches[state] |
|
984 | 1094 | except IndexError: |
|
985 | 1095 | return None |
|
986 | 1096 | |
|
987 | 1097 | def global_matches(self, text): |
|
988 | 1098 | """Compute matches when text is a simple name. |
|
989 | 1099 | |
|
990 | 1100 | Return a list of all keywords, built-in functions and names currently |
|
991 | 1101 | defined in self.namespace or self.global_namespace that match. |
|
992 | 1102 | |
|
993 | 1103 | """ |
|
994 | 1104 | matches = [] |
|
995 | 1105 | match_append = matches.append |
|
996 | 1106 | n = len(text) |
|
997 | 1107 | for lst in [ |
|
998 | 1108 | keyword.kwlist, |
|
999 | 1109 | builtin_mod.__dict__.keys(), |
|
1000 | 1110 | list(self.namespace.keys()), |
|
1001 | 1111 | list(self.global_namespace.keys()), |
|
1002 | 1112 | ]: |
|
1003 | 1113 | for word in lst: |
|
1004 | 1114 | if word[:n] == text and word != "__builtins__": |
|
1005 | 1115 | match_append(word) |
|
1006 | 1116 | |
|
1007 | 1117 | snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z") |
|
1008 | 1118 | for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]: |
|
1009 | 1119 | shortened = { |
|
1010 | 1120 | "_".join([sub[0] for sub in word.split("_")]): word |
|
1011 | 1121 | for word in lst |
|
1012 | 1122 | if snake_case_re.match(word) |
|
1013 | 1123 | } |
|
1014 | 1124 | for word in shortened.keys(): |
|
1015 | 1125 | if word[:n] == text and word != "__builtins__": |
|
1016 | 1126 | match_append(shortened[word]) |
|
1017 | 1127 | return matches |
|
1018 | 1128 | |
|
1019 | 1129 | def attr_matches(self, text): |
|
1020 | 1130 | """Compute matches when text contains a dot. |
|
1021 | 1131 | |
|
1022 | 1132 | Assuming the text is of the form NAME.NAME....[NAME], and is |
|
1023 | 1133 | evaluatable in self.namespace or self.global_namespace, it will be |
|
1024 | 1134 | evaluated and its attributes (as revealed by dir()) are used as |
|
1025 | 1135 | possible completions. (For class instances, class members are |
|
1026 | 1136 | also considered.) |
|
1027 | 1137 | |
|
1028 | 1138 | WARNING: this can still invoke arbitrary C code, if an object |
|
1029 | 1139 | with a __getattr__ hook is evaluated. |
|
1030 | 1140 | |
|
1031 | 1141 | """ |
|
1032 | ||
|
1033 | # Another option, seems to work great. Catches things like ''.<tab> | |
|
1034 | m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text) | |
|
1035 | ||
|
1036 | if m: | |
|
1037 | expr, attr = m.group(1, 3) | |
|
1038 | elif self.greedy: | |
|
1039 | 1142 |
|
|
1040 | 1143 |
|
|
1041 | 1144 |
|
|
1042 | 1145 |
|
|
1043 | else: | |
|
1044 | return [] | |
|
1045 | 1146 | |
|
1046 | try: | |
|
1047 | obj = eval(expr, self.namespace) | |
|
1048 | except: | |
|
1049 | try: | |
|
1050 | obj = eval(expr, self.global_namespace) | |
|
1051 | except: | |
|
1147 | obj = self._evaluate_expr(expr) | |
|
1148 | ||
|
1149 | if obj is not_found: | |
|
1052 | 1150 |
|
|
1053 | 1151 | |
|
1054 | 1152 | if self.limit_to__all__ and hasattr(obj, '__all__'): |
|
1055 | 1153 | words = get__all__entries(obj) |
|
1056 | 1154 | else: |
|
1057 | 1155 | words = dir2(obj) |
|
1058 | 1156 | |
|
1059 | 1157 | try: |
|
1060 | 1158 | words = generics.complete_object(obj, words) |
|
1061 | 1159 | except TryNext: |
|
1062 | 1160 | pass |
|
1063 | 1161 | except AssertionError: |
|
1064 | 1162 | raise |
|
1065 | 1163 | except Exception: |
|
1066 | 1164 | # Silence errors from completion function |
|
1067 | 1165 | #raise # dbg |
|
1068 | 1166 | pass |
|
1069 | 1167 | # Build match list to return |
|
1070 | 1168 | n = len(attr) |
|
1071 |
return [ |
|
|
1169 | return ["%s.%s" % (expr, w) for w in words if w[:n] == attr] | |
|
1072 | 1170 | |
|
1171 | def _evaluate_expr(self, expr): | |
|
1172 | obj = not_found | |
|
1173 | done = False | |
|
1174 | while not done and expr: | |
|
1175 | try: | |
|
1176 | obj = guarded_eval( | |
|
1177 | expr, | |
|
1178 | EvaluationContext( | |
|
1179 | globals=self.global_namespace, | |
|
1180 | locals=self.namespace, | |
|
1181 | evaluation=self.evaluation, | |
|
1182 | ), | |
|
1183 | ) | |
|
1184 | done = True | |
|
1185 | except Exception as e: | |
|
1186 | if self.debug: | |
|
1187 | print("Evaluation exception", e) | |
|
1188 | # trim the expression to remove any invalid prefix | |
|
1189 | # e.g. user starts `(d[`, so we get `expr = '(d'`, | |
|
1190 | # where parenthesis is not closed. | |
|
1191 | # TODO: make this faster by reusing parts of the computation? | |
|
1192 | expr = expr[1:] | |
|
1193 | return obj | |
|
1073 | 1194 | |
|
1074 | 1195 | def get__all__entries(obj): |
|
1075 | 1196 | """returns the strings in the __all__ attribute""" |
|
1076 | 1197 | try: |
|
1077 | 1198 | words = getattr(obj, '__all__') |
|
1078 | 1199 | except: |
|
1079 | 1200 | return [] |
|
1080 | 1201 | |
|
1081 | 1202 | return [w for w in words if isinstance(w, str)] |
|
1082 | 1203 | |
|
1083 | 1204 | |
|
1084 | def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str, | |
|
1085 | extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]: | |
|
1205 | class _DictKeyState(enum.Flag): | |
|
1206 | """Represent state of the key match in context of other possible matches. | |
|
1207 | ||
|
1208 | - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple. | |
|
1209 | - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`. | |
|
1210 | - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added. | |
|
1211 | - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}` | |
|
1212 | """ | |
|
1213 | ||
|
1214 | BASELINE = 0 | |
|
1215 | END_OF_ITEM = enum.auto() | |
|
1216 | END_OF_TUPLE = enum.auto() | |
|
1217 | IN_TUPLE = enum.auto() | |
|
1218 | ||
|
1219 | ||
|
1220 | def _parse_tokens(c): | |
|
1221 | """Parse tokens even if there is an error.""" | |
|
1222 | tokens = [] | |
|
1223 | token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__) | |
|
1224 | while True: | |
|
1225 | try: | |
|
1226 | tokens.append(next(token_generator)) | |
|
1227 | except tokenize.TokenError: | |
|
1228 | return tokens | |
|
1229 | except StopIteration: | |
|
1230 | return tokens | |
|
1231 | ||
|
1232 | ||
|
1233 | def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]: | |
|
1234 | """Match any valid Python numeric literal in a prefix of dictionary keys. | |
|
1235 | ||
|
1236 | References: | |
|
1237 | - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals | |
|
1238 | - https://docs.python.org/3/library/tokenize.html | |
|
1239 | """ | |
|
1240 | if prefix[-1].isspace(): | |
|
1241 | # if user typed a space we do not have anything to complete | |
|
1242 | # even if there was a valid number token before | |
|
1243 | return None | |
|
1244 | tokens = _parse_tokens(prefix) | |
|
1245 | rev_tokens = reversed(tokens) | |
|
1246 | skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE} | |
|
1247 | number = None | |
|
1248 | for token in rev_tokens: | |
|
1249 | if token.type in skip_over: | |
|
1250 | continue | |
|
1251 | if number is None: | |
|
1252 | if token.type == tokenize.NUMBER: | |
|
1253 | number = token.string | |
|
1254 | continue | |
|
1255 | else: | |
|
1256 | # we did not match a number | |
|
1257 | return None | |
|
1258 | if token.type == tokenize.OP: | |
|
1259 | if token.string == ",": | |
|
1260 | break | |
|
1261 | if token.string in {"+", "-"}: | |
|
1262 | number = token.string + number | |
|
1263 | else: | |
|
1264 | return None | |
|
1265 | return number | |
|
1266 | ||
|
1267 | ||
|
1268 | _INT_FORMATS = { | |
|
1269 | "0b": bin, | |
|
1270 | "0o": oct, | |
|
1271 | "0x": hex, | |
|
1272 | } | |
|
1273 | ||
|
1274 | ||
|
1275 | def match_dict_keys( | |
|
1276 | keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]], | |
|
1277 | prefix: str, | |
|
1278 | delims: str, | |
|
1279 | extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None, | |
|
1280 | ) -> Tuple[str, int, Dict[str, _DictKeyState]]: | |
|
1086 | 1281 | """Used by dict_key_matches, matching the prefix to a list of keys |
|
1087 | 1282 | |
|
1088 | 1283 | Parameters |
|
1089 | 1284 | ---------- |
|
1090 | 1285 | keys |
|
1091 | 1286 | list of keys in dictionary currently being completed. |
|
1092 | 1287 | prefix |
|
1093 | 1288 | Part of the text already typed by the user. E.g. `mydict[b'fo` |
|
1094 | 1289 | delims |
|
1095 | 1290 | String of delimiters to consider when finding the current key. |
|
1096 | 1291 | extra_prefix : optional |
|
1097 | 1292 | Part of the text already typed in multi-key index cases. E.g. for |
|
1098 | 1293 | `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`. |
|
1099 | 1294 | |
|
1100 | 1295 | Returns |
|
1101 | 1296 | ------- |
|
1102 | 1297 | A tuple of three elements: ``quote``, ``token_start``, ``matched``, with |
|
1103 | 1298 | ``quote`` being the quote that need to be used to close current string. |
|
1104 | 1299 | ``token_start`` the position where the replacement should start occurring, |
|
1105 |
``matches`` a |
|
|
1106 | ||
|
1300 | ``matches`` a dictionary of replacement/completion keys on keys and values | |
|
1301 | indicating whether the state. | |
|
1107 | 1302 | """ |
|
1108 | 1303 | prefix_tuple = extra_prefix if extra_prefix else () |
|
1109 | Nprefix = len(prefix_tuple) | |
|
1304 | ||
|
1305 | prefix_tuple_size = sum( | |
|
1306 | [ | |
|
1307 | # for pandas, do not count slices as taking space | |
|
1308 | not isinstance(k, slice) | |
|
1309 | for k in prefix_tuple | |
|
1310 | ] | |
|
1311 | ) | |
|
1312 | text_serializable_types = (str, bytes, int, float, slice) | |
|
1313 | ||
|
1110 | 1314 | def filter_prefix_tuple(key): |
|
1111 | 1315 | # Reject too short keys |
|
1112 |
if len(key) <= |
|
|
1316 | if len(key) <= prefix_tuple_size: | |
|
1113 | 1317 | return False |
|
1114 |
# Reject keys w |
|
|
1318 | # Reject keys which cannot be serialised to text | |
|
1115 | 1319 | for k in key: |
|
1116 |
if not isinstance(k, |
|
|
1320 | if not isinstance(k, text_serializable_types): | |
|
1117 | 1321 | return False |
|
1118 | 1322 | # Reject keys that do not match the prefix |
|
1119 | 1323 | for k, pt in zip(key, prefix_tuple): |
|
1120 | if k != pt: | |
|
1324 | if k != pt and not isinstance(pt, slice): | |
|
1121 | 1325 | return False |
|
1122 | 1326 | # All checks passed! |
|
1123 | 1327 | return True |
|
1124 | 1328 | |
|
1125 | filtered_keys:List[Union[str,bytes]] = [] | |
|
1126 | def _add_to_filtered_keys(key): | |
|
1127 | if isinstance(key, (str, bytes)): | |
|
1128 | filtered_keys.append(key) | |
|
1329 | filtered_key_is_final: Dict[ | |
|
1330 | Union[str, bytes, int, float], _DictKeyState | |
|
1331 | ] = defaultdict(lambda: _DictKeyState.BASELINE) | |
|
1129 | 1332 | |
|
1130 | 1333 | for k in keys: |
|
1334 | # If at least one of the matches is not final, mark as undetermined. | |
|
1335 | # This can happen with `d = {111: 'b', (111, 222): 'a'}` where | |
|
1336 | # `111` appears final on first match but is not final on the second. | |
|
1337 | ||
|
1131 | 1338 | if isinstance(k, tuple): |
|
1132 | 1339 | if filter_prefix_tuple(k): |
|
1133 | _add_to_filtered_keys(k[Nprefix]) | |
|
1340 | key_fragment = k[prefix_tuple_size] | |
|
1341 | filtered_key_is_final[key_fragment] |= ( | |
|
1342 | _DictKeyState.END_OF_TUPLE | |
|
1343 | if len(k) == prefix_tuple_size + 1 | |
|
1344 | else _DictKeyState.IN_TUPLE | |
|
1345 | ) | |
|
1346 | elif prefix_tuple_size > 0: | |
|
1347 | # we are completing a tuple but this key is not a tuple, | |
|
1348 | # so we should ignore it | |
|
1349 | pass | |
|
1134 | 1350 | else: |
|
1135 | _add_to_filtered_keys(k) | |
|
1351 | if isinstance(k, text_serializable_types): | |
|
1352 | filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM | |
|
1353 | ||
|
1354 | filtered_keys = filtered_key_is_final.keys() | |
|
1136 | 1355 | |
|
1137 | 1356 | if not prefix: |
|
1138 |
return |
|
|
1139 | quote_match = re.search('["\']', prefix) | |
|
1140 | assert quote_match is not None # silence mypy | |
|
1357 | return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()} | |
|
1358 | ||
|
1359 | quote_match = re.search("(?:\"|')", prefix) | |
|
1360 | is_user_prefix_numeric = False | |
|
1361 | ||
|
1362 | if quote_match: | |
|
1141 | 1363 | quote = quote_match.group() |
|
1364 | valid_prefix = prefix + quote | |
|
1142 | 1365 | try: |
|
1143 |
prefix_str = eval(prefix |
|
|
1366 | prefix_str = literal_eval(valid_prefix) | |
|
1144 | 1367 | except Exception: |
|
1145 |
return |
|
|
1368 | return "", 0, {} | |
|
1369 | else: | |
|
1370 | # If it does not look like a string, let's assume | |
|
1371 | # we are dealing with a number or variable. | |
|
1372 | number_match = _match_number_in_dict_key_prefix(prefix) | |
|
1373 | ||
|
1374 | # We do not want the key matcher to suggest variable names so we yield: | |
|
1375 | if number_match is None: | |
|
1376 | # The alternative would be to assume that user forgort the quote | |
|
1377 | # and if the substring matches, suggest adding it at the start. | |
|
1378 | return "", 0, {} | |
|
1379 | ||
|
1380 | prefix_str = number_match | |
|
1381 | is_user_prefix_numeric = True | |
|
1382 | quote = "" | |
|
1146 | 1383 | |
|
1147 | 1384 | pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$' |
|
1148 | 1385 | token_match = re.search(pattern, prefix, re.UNICODE) |
|
1149 | 1386 | assert token_match is not None # silence mypy |
|
1150 | 1387 | token_start = token_match.start() |
|
1151 | 1388 | token_prefix = token_match.group() |
|
1152 | 1389 | |
|
1153 |
matched: |
|
|
1390 | matched: Dict[str, _DictKeyState] = {} | |
|
1391 | ||
|
1392 | str_key: Union[str, bytes] | |
|
1393 | ||
|
1154 | 1394 | for key in filtered_keys: |
|
1395 | if isinstance(key, (int, float)): | |
|
1396 | # User typed a number but this key is not a number. | |
|
1397 | if not is_user_prefix_numeric: | |
|
1398 | continue | |
|
1399 | str_key = str(key) | |
|
1400 | if isinstance(key, int): | |
|
1401 | int_base = prefix_str[:2].lower() | |
|
1402 | # if user typed integer using binary/oct/hex notation: | |
|
1403 | if int_base in _INT_FORMATS: | |
|
1404 | int_format = _INT_FORMATS[int_base] | |
|
1405 | str_key = int_format(key) | |
|
1406 | else: | |
|
1407 | # User typed a string but this key is a number. | |
|
1408 | if is_user_prefix_numeric: | |
|
1409 | continue | |
|
1410 | str_key = key | |
|
1155 | 1411 | try: |
|
1156 | if not key.startswith(prefix_str): | |
|
1412 | if not str_key.startswith(prefix_str): | |
|
1157 | 1413 | continue |
|
1158 | except (AttributeError, TypeError, UnicodeError): | |
|
1414 | except (AttributeError, TypeError, UnicodeError) as e: | |
|
1159 | 1415 | # Python 3+ TypeError on b'a'.startswith('a') or vice-versa |
|
1160 | 1416 | continue |
|
1161 | 1417 | |
|
1162 | 1418 | # reformat remainder of key to begin with prefix |
|
1163 | rem = key[len(prefix_str):] | |
|
1419 | rem = str_key[len(prefix_str) :] | |
|
1164 | 1420 | # force repr wrapped in ' |
|
1165 | 1421 | rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"') |
|
1166 | 1422 | rem_repr = rem_repr[1 + rem_repr.index("'"):-2] |
|
1167 | 1423 | if quote == '"': |
|
1168 | 1424 | # The entered prefix is quoted with ", |
|
1169 | 1425 | # but the match is quoted with '. |
|
1170 | 1426 | # A contained " hence needs escaping for comparison: |
|
1171 | 1427 | rem_repr = rem_repr.replace('"', '\\"') |
|
1172 | 1428 | |
|
1173 | 1429 | # then reinsert prefix from start of token |
|
1174 |
match |
|
|
1430 | match = "%s%s" % (token_prefix, rem_repr) | |
|
1431 | ||
|
1432 | matched[match] = filtered_key_is_final[key] | |
|
1175 | 1433 | return quote, token_start, matched |
|
1176 | 1434 | |
|
1177 | 1435 | |
|
1178 | 1436 | def cursor_to_position(text:str, line:int, column:int)->int: |
|
1179 | 1437 | """ |
|
1180 | 1438 | Convert the (line,column) position of the cursor in text to an offset in a |
|
1181 | 1439 | string. |
|
1182 | 1440 | |
|
1183 | 1441 | Parameters |
|
1184 | 1442 | ---------- |
|
1185 | 1443 | text : str |
|
1186 | 1444 | The text in which to calculate the cursor offset |
|
1187 | 1445 | line : int |
|
1188 | 1446 | Line of the cursor; 0-indexed |
|
1189 | 1447 | column : int |
|
1190 | 1448 | Column of the cursor 0-indexed |
|
1191 | 1449 | |
|
1192 | 1450 | Returns |
|
1193 | 1451 | ------- |
|
1194 | 1452 | Position of the cursor in ``text``, 0-indexed. |
|
1195 | 1453 | |
|
1196 | 1454 | See Also |
|
1197 | 1455 | -------- |
|
1198 | 1456 | position_to_cursor : reciprocal of this function |
|
1199 | 1457 | |
|
1200 | 1458 | """ |
|
1201 | 1459 | lines = text.split('\n') |
|
1202 | 1460 | assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines))) |
|
1203 | 1461 | |
|
1204 | 1462 | return sum(len(l) + 1 for l in lines[:line]) + column |
|
1205 | 1463 | |
|
1206 | 1464 | def position_to_cursor(text:str, offset:int)->Tuple[int, int]: |
|
1207 | 1465 | """ |
|
1208 | 1466 | Convert the position of the cursor in text (0 indexed) to a line |
|
1209 | 1467 | number(0-indexed) and a column number (0-indexed) pair |
|
1210 | 1468 | |
|
1211 | 1469 | Position should be a valid position in ``text``. |
|
1212 | 1470 | |
|
1213 | 1471 | Parameters |
|
1214 | 1472 | ---------- |
|
1215 | 1473 | text : str |
|
1216 | 1474 | The text in which to calculate the cursor offset |
|
1217 | 1475 | offset : int |
|
1218 | 1476 | Position of the cursor in ``text``, 0-indexed. |
|
1219 | 1477 | |
|
1220 | 1478 | Returns |
|
1221 | 1479 | ------- |
|
1222 | 1480 | (line, column) : (int, int) |
|
1223 | 1481 | Line of the cursor; 0-indexed, column of the cursor 0-indexed |
|
1224 | 1482 | |
|
1225 | 1483 | See Also |
|
1226 | 1484 | -------- |
|
1227 | 1485 | cursor_to_position : reciprocal of this function |
|
1228 | 1486 | |
|
1229 | 1487 | """ |
|
1230 | 1488 | |
|
1231 | 1489 | assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text)) |
|
1232 | 1490 | |
|
1233 | 1491 | before = text[:offset] |
|
1234 | 1492 | blines = before.split('\n') # ! splitnes trim trailing \n |
|
1235 | 1493 | line = before.count('\n') |
|
1236 | 1494 | col = len(blines[-1]) |
|
1237 | 1495 | return line, col |
|
1238 | 1496 | |
|
1239 | 1497 | |
|
1240 | def _safe_isinstance(obj, module, class_name): | |
|
1498 | def _safe_isinstance(obj, module, class_name, *attrs): | |
|
1241 | 1499 | """Checks if obj is an instance of module.class_name if loaded |
|
1242 | 1500 | """ |
|
1243 |
|
|
|
1244 | isinstance(obj, getattr(import_module(module), class_name))) | |
|
1501 | if module in sys.modules: | |
|
1502 | m = sys.modules[module] | |
|
1503 | for attr in [class_name, *attrs]: | |
|
1504 | m = getattr(m, attr) | |
|
1505 | return isinstance(obj, m) | |
|
1245 | 1506 | |
|
1246 | 1507 | |
|
1247 | 1508 | @context_matcher() |
|
1248 | 1509 | def back_unicode_name_matcher(context: CompletionContext): |
|
1249 | 1510 | """Match Unicode characters back to Unicode name |
|
1250 | 1511 | |
|
1251 | 1512 | Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API. |
|
1252 | 1513 | """ |
|
1253 | 1514 | fragment, matches = back_unicode_name_matches(context.text_until_cursor) |
|
1254 | 1515 | return _convert_matcher_v1_result_to_v2( |
|
1255 | 1516 | matches, type="unicode", fragment=fragment, suppress_if_matches=True |
|
1256 | 1517 | ) |
|
1257 | 1518 | |
|
1258 | 1519 | |
|
1259 | 1520 | def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]: |
|
1260 | 1521 | """Match Unicode characters back to Unicode name |
|
1261 | 1522 | |
|
1262 | 1523 | This does ``☃`` -> ``\\snowman`` |
|
1263 | 1524 | |
|
1264 | 1525 | Note that snowman is not a valid python3 combining character but will be expanded. |
|
1265 | 1526 | Though it will not recombine back to the snowman character by the completion machinery. |
|
1266 | 1527 | |
|
1267 | 1528 | This will not either back-complete standard sequences like \\n, \\b ... |
|
1268 | 1529 | |
|
1269 | 1530 | .. deprecated:: 8.6 |
|
1270 | 1531 | You can use :meth:`back_unicode_name_matcher` instead. |
|
1271 | 1532 | |
|
1272 | 1533 | Returns |
|
1273 | 1534 | ======= |
|
1274 | 1535 | |
|
1275 | 1536 | Return a tuple with two elements: |
|
1276 | 1537 | |
|
1277 | 1538 | - The Unicode character that was matched (preceded with a backslash), or |
|
1278 | 1539 | empty string, |
|
1279 | 1540 | - a sequence (of 1), name for the match Unicode character, preceded by |
|
1280 | 1541 | backslash, or empty if no match. |
|
1281 | 1542 | """ |
|
1282 | 1543 | if len(text)<2: |
|
1283 | 1544 | return '', () |
|
1284 | 1545 | maybe_slash = text[-2] |
|
1285 | 1546 | if maybe_slash != '\\': |
|
1286 | 1547 | return '', () |
|
1287 | 1548 | |
|
1288 | 1549 | char = text[-1] |
|
1289 | 1550 | # no expand on quote for completion in strings. |
|
1290 | 1551 | # nor backcomplete standard ascii keys |
|
1291 | 1552 | if char in string.ascii_letters or char in ('"',"'"): |
|
1292 | 1553 | return '', () |
|
1293 | 1554 | try : |
|
1294 | 1555 | unic = unicodedata.name(char) |
|
1295 | 1556 | return '\\'+char,('\\'+unic,) |
|
1296 | 1557 | except KeyError: |
|
1297 | 1558 | pass |
|
1298 | 1559 | return '', () |
|
1299 | 1560 | |
|
1300 | 1561 | |
|
1301 | 1562 | @context_matcher() |
|
1302 | 1563 | def back_latex_name_matcher(context: CompletionContext): |
|
1303 | 1564 | """Match latex characters back to unicode name |
|
1304 | 1565 | |
|
1305 | 1566 | Same as :any:`back_latex_name_matches`, but adopted to new Matcher API. |
|
1306 | 1567 | """ |
|
1307 | 1568 | fragment, matches = back_latex_name_matches(context.text_until_cursor) |
|
1308 | 1569 | return _convert_matcher_v1_result_to_v2( |
|
1309 | 1570 | matches, type="latex", fragment=fragment, suppress_if_matches=True |
|
1310 | 1571 | ) |
|
1311 | 1572 | |
|
1312 | 1573 | |
|
1313 | 1574 | def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]: |
|
1314 | 1575 | """Match latex characters back to unicode name |
|
1315 | 1576 | |
|
1316 | 1577 | This does ``\\ℵ`` -> ``\\aleph`` |
|
1317 | 1578 | |
|
1318 | 1579 | .. deprecated:: 8.6 |
|
1319 | 1580 | You can use :meth:`back_latex_name_matcher` instead. |
|
1320 | 1581 | """ |
|
1321 | 1582 | if len(text)<2: |
|
1322 | 1583 | return '', () |
|
1323 | 1584 | maybe_slash = text[-2] |
|
1324 | 1585 | if maybe_slash != '\\': |
|
1325 | 1586 | return '', () |
|
1326 | 1587 | |
|
1327 | 1588 | |
|
1328 | 1589 | char = text[-1] |
|
1329 | 1590 | # no expand on quote for completion in strings. |
|
1330 | 1591 | # nor backcomplete standard ascii keys |
|
1331 | 1592 | if char in string.ascii_letters or char in ('"',"'"): |
|
1332 | 1593 | return '', () |
|
1333 | 1594 | try : |
|
1334 | 1595 | latex = reverse_latex_symbol[char] |
|
1335 | 1596 | # '\\' replace the \ as well |
|
1336 | 1597 | return '\\'+char,[latex] |
|
1337 | 1598 | except KeyError: |
|
1338 | 1599 | pass |
|
1339 | 1600 | return '', () |
|
1340 | 1601 | |
|
1341 | 1602 | |
|
1342 | 1603 | def _formatparamchildren(parameter) -> str: |
|
1343 | 1604 | """ |
|
1344 | 1605 | Get parameter name and value from Jedi Private API |
|
1345 | 1606 | |
|
1346 | 1607 | Jedi does not expose a simple way to get `param=value` from its API. |
|
1347 | 1608 | |
|
1348 | 1609 | Parameters |
|
1349 | 1610 | ---------- |
|
1350 | 1611 | parameter |
|
1351 | 1612 | Jedi's function `Param` |
|
1352 | 1613 | |
|
1353 | 1614 | Returns |
|
1354 | 1615 | ------- |
|
1355 | 1616 | A string like 'a', 'b=1', '*args', '**kwargs' |
|
1356 | 1617 | |
|
1357 | 1618 | """ |
|
1358 | 1619 | description = parameter.description |
|
1359 | 1620 | if not description.startswith('param '): |
|
1360 | 1621 | raise ValueError('Jedi function parameter description have change format.' |
|
1361 | 1622 | 'Expected "param ...", found %r".' % description) |
|
1362 | 1623 | return description[6:] |
|
1363 | 1624 | |
|
1364 | 1625 | def _make_signature(completion)-> str: |
|
1365 | 1626 | """ |
|
1366 | 1627 | Make the signature from a jedi completion |
|
1367 | 1628 | |
|
1368 | 1629 | Parameters |
|
1369 | 1630 | ---------- |
|
1370 | 1631 | completion : jedi.Completion |
|
1371 | 1632 | object does not complete a function type |
|
1372 | 1633 | |
|
1373 | 1634 | Returns |
|
1374 | 1635 | ------- |
|
1375 | 1636 | a string consisting of the function signature, with the parenthesis but |
|
1376 | 1637 | without the function name. example: |
|
1377 | 1638 | `(a, *args, b=1, **kwargs)` |
|
1378 | 1639 | |
|
1379 | 1640 | """ |
|
1380 | 1641 | |
|
1381 | 1642 | # it looks like this might work on jedi 0.17 |
|
1382 | 1643 | if hasattr(completion, 'get_signatures'): |
|
1383 | 1644 | signatures = completion.get_signatures() |
|
1384 | 1645 | if not signatures: |
|
1385 | 1646 | return '(?)' |
|
1386 | 1647 | |
|
1387 | 1648 | c0 = completion.get_signatures()[0] |
|
1388 | 1649 | return '('+c0.to_string().split('(', maxsplit=1)[1] |
|
1389 | 1650 | |
|
1390 | 1651 | return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures() |
|
1391 | 1652 | for p in signature.defined_names()) if f]) |
|
1392 | 1653 | |
|
1393 | 1654 | |
|
1394 | 1655 | _CompleteResult = Dict[str, MatcherResult] |
|
1395 | 1656 | |
|
1396 | 1657 | |
|
1658 | DICT_MATCHER_REGEX = re.compile( | |
|
1659 | r"""(?x) | |
|
1660 | ( # match dict-referring - or any get item object - expression | |
|
1661 | .+ | |
|
1662 | ) | |
|
1663 | \[ # open bracket | |
|
1664 | \s* # and optional whitespace | |
|
1665 | # Capture any number of serializable objects (e.g. "a", "b", 'c') | |
|
1666 | # and slices | |
|
1667 | ((?:(?: | |
|
1668 | (?: # closed string | |
|
1669 | [uUbB]? # string prefix (r not handled) | |
|
1670 | (?: | |
|
1671 | '(?:[^']|(?<!\\)\\')*' | |
|
1672 | | | |
|
1673 | "(?:[^"]|(?<!\\)\\")*" | |
|
1674 | ) | |
|
1675 | ) | |
|
1676 | | | |
|
1677 | # capture integers and slices | |
|
1678 | (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2} | |
|
1679 | | | |
|
1680 | # integer in bin/hex/oct notation | |
|
1681 | 0[bBxXoO]_?(?:\w|\d)+ | |
|
1682 | ) | |
|
1683 | \s*,\s* | |
|
1684 | )*) | |
|
1685 | ((?: | |
|
1686 | (?: # unclosed string | |
|
1687 | [uUbB]? # string prefix (r not handled) | |
|
1688 | (?: | |
|
1689 | '(?:[^']|(?<!\\)\\')* | |
|
1690 | | | |
|
1691 | "(?:[^"]|(?<!\\)\\")* | |
|
1692 | ) | |
|
1693 | ) | |
|
1694 | | | |
|
1695 | # unfinished integer | |
|
1696 | (?:[-+]?\d+) | |
|
1697 | | | |
|
1698 | # integer in bin/hex/oct notation | |
|
1699 | 0[bBxXoO]_?(?:\w|\d)+ | |
|
1700 | ) | |
|
1701 | )? | |
|
1702 | $ | |
|
1703 | """ | |
|
1704 | ) | |
|
1705 | ||
|
1706 | ||
|
1397 | 1707 | def _convert_matcher_v1_result_to_v2( |
|
1398 | 1708 | matches: Sequence[str], |
|
1399 | 1709 | type: str, |
|
1400 | fragment: str = None, | |
|
1710 | fragment: Optional[str] = None, | |
|
1401 | 1711 | suppress_if_matches: bool = False, |
|
1402 | 1712 | ) -> SimpleMatcherResult: |
|
1403 | 1713 | """Utility to help with transition""" |
|
1404 | 1714 | result = { |
|
1405 | 1715 | "completions": [SimpleCompletion(text=match, type=type) for match in matches], |
|
1406 | 1716 | "suppress": (True if matches else False) if suppress_if_matches else False, |
|
1407 | 1717 | } |
|
1408 | 1718 | if fragment is not None: |
|
1409 | 1719 | result["matched_fragment"] = fragment |
|
1410 | return result | |
|
1720 | return cast(SimpleMatcherResult, result) | |
|
1411 | 1721 | |
|
1412 | 1722 | |
|
1413 | 1723 | class IPCompleter(Completer): |
|
1414 | 1724 | """Extension of the completer class with IPython-specific features""" |
|
1415 | 1725 | |
|
1416 | __dict_key_regexps: Optional[Dict[bool,Pattern]] = None | |
|
1417 | ||
|
1418 | 1726 | @observe('greedy') |
|
1419 | 1727 | def _greedy_changed(self, change): |
|
1420 | 1728 | """update the splitter and readline delims when greedy is changed""" |
|
1421 |
if change[ |
|
|
1729 | if change["new"]: | |
|
1730 | self.evaluation = "unsafe" | |
|
1731 | self.auto_close_dict_keys = True | |
|
1422 | 1732 | self.splitter.delims = GREEDY_DELIMS |
|
1423 | 1733 | else: |
|
1734 | self.evaluation = "limited" | |
|
1735 | self.auto_close_dict_keys = False | |
|
1424 | 1736 | self.splitter.delims = DELIMS |
|
1425 | 1737 | |
|
1426 | 1738 | dict_keys_only = Bool( |
|
1427 | 1739 | False, |
|
1428 | 1740 | help=""" |
|
1429 | 1741 | Whether to show dict key matches only. |
|
1430 | 1742 | |
|
1431 | 1743 | (disables all matchers except for `IPCompleter.dict_key_matcher`). |
|
1432 | 1744 | """, |
|
1433 | 1745 | ) |
|
1434 | 1746 | |
|
1435 | 1747 | suppress_competing_matchers = UnionTrait( |
|
1436 | 1748 | [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))], |
|
1437 | 1749 | default_value=None, |
|
1438 | 1750 | help=""" |
|
1439 | 1751 | Whether to suppress completions from other *Matchers*. |
|
1440 | 1752 | |
|
1441 | 1753 | When set to ``None`` (default) the matchers will attempt to auto-detect |
|
1442 | 1754 | whether suppression of other matchers is desirable. For example, at |
|
1443 | 1755 | the beginning of a line followed by `%` we expect a magic completion |
|
1444 | 1756 | to be the only applicable option, and after ``my_dict['`` we usually |
|
1445 | 1757 | expect a completion with an existing dictionary key. |
|
1446 | 1758 | |
|
1447 | 1759 | If you want to disable this heuristic and see completions from all matchers, |
|
1448 | 1760 | set ``IPCompleter.suppress_competing_matchers = False``. |
|
1449 | 1761 | To disable the heuristic for specific matchers provide a dictionary mapping: |
|
1450 | 1762 | ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``. |
|
1451 | 1763 | |
|
1452 | 1764 | Set ``IPCompleter.suppress_competing_matchers = True`` to limit |
|
1453 | 1765 | completions to the set of matchers with the highest priority; |
|
1454 | 1766 | this is equivalent to ``IPCompleter.merge_completions`` and |
|
1455 | 1767 | can be beneficial for performance, but will sometimes omit relevant |
|
1456 | 1768 | candidates from matchers further down the priority list. |
|
1457 | 1769 | """, |
|
1458 | 1770 | ).tag(config=True) |
|
1459 | 1771 | |
|
1460 | 1772 | merge_completions = Bool( |
|
1461 | 1773 | True, |
|
1462 | 1774 | help="""Whether to merge completion results into a single list |
|
1463 | 1775 | |
|
1464 | 1776 | If False, only the completion results from the first non-empty |
|
1465 | 1777 | completer will be returned. |
|
1466 | 1778 | |
|
1467 | 1779 | As of version 8.6.0, setting the value to ``False`` is an alias for: |
|
1468 | 1780 | ``IPCompleter.suppress_competing_matchers = True.``. |
|
1469 | 1781 | """, |
|
1470 | 1782 | ).tag(config=True) |
|
1471 | 1783 | |
|
1472 | 1784 | disable_matchers = ListTrait( |
|
1473 | 1785 | Unicode(), |
|
1474 | 1786 | help="""List of matchers to disable. |
|
1475 | 1787 | |
|
1476 | 1788 | The list should contain matcher identifiers (see :any:`completion_matcher`). |
|
1477 | 1789 | """, |
|
1478 | 1790 | ).tag(config=True) |
|
1479 | 1791 | |
|
1480 | 1792 | omit__names = Enum( |
|
1481 | 1793 | (0, 1, 2), |
|
1482 | 1794 | default_value=2, |
|
1483 | 1795 | help="""Instruct the completer to omit private method names |
|
1484 | 1796 | |
|
1485 | 1797 | Specifically, when completing on ``object.<tab>``. |
|
1486 | 1798 | |
|
1487 | 1799 | When 2 [default]: all names that start with '_' will be excluded. |
|
1488 | 1800 | |
|
1489 | 1801 | When 1: all 'magic' names (``__foo__``) will be excluded. |
|
1490 | 1802 | |
|
1491 | 1803 | When 0: nothing will be excluded. |
|
1492 | 1804 | """ |
|
1493 | 1805 | ).tag(config=True) |
|
1494 | 1806 | limit_to__all__ = Bool(False, |
|
1495 | 1807 | help=""" |
|
1496 | 1808 | DEPRECATED as of version 5.0. |
|
1497 | 1809 | |
|
1498 | 1810 | Instruct the completer to use __all__ for the completion |
|
1499 | 1811 | |
|
1500 | 1812 | Specifically, when completing on ``object.<tab>``. |
|
1501 | 1813 | |
|
1502 | 1814 | When True: only those names in obj.__all__ will be included. |
|
1503 | 1815 | |
|
1504 | 1816 | When False [default]: the __all__ attribute is ignored |
|
1505 | 1817 | """, |
|
1506 | 1818 | ).tag(config=True) |
|
1507 | 1819 | |
|
1508 | 1820 | profile_completions = Bool( |
|
1509 | 1821 | default_value=False, |
|
1510 | 1822 | help="If True, emit profiling data for completion subsystem using cProfile." |
|
1511 | 1823 | ).tag(config=True) |
|
1512 | 1824 | |
|
1513 | 1825 | profiler_output_dir = Unicode( |
|
1514 | 1826 | default_value=".completion_profiles", |
|
1515 | 1827 | help="Template for path at which to output profile data for completions." |
|
1516 | 1828 | ).tag(config=True) |
|
1517 | 1829 | |
|
1518 | 1830 | @observe('limit_to__all__') |
|
1519 | 1831 | def _limit_to_all_changed(self, change): |
|
1520 | 1832 | warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration ' |
|
1521 | 1833 | 'value has been deprecated since IPython 5.0, will be made to have ' |
|
1522 | 1834 | 'no effects and then removed in future version of IPython.', |
|
1523 | 1835 | UserWarning) |
|
1524 | 1836 | |
|
1525 | 1837 | def __init__( |
|
1526 | 1838 | self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs |
|
1527 | 1839 | ): |
|
1528 | 1840 | """IPCompleter() -> completer |
|
1529 | 1841 | |
|
1530 | 1842 | Return a completer object. |
|
1531 | 1843 | |
|
1532 | 1844 | Parameters |
|
1533 | 1845 | ---------- |
|
1534 | 1846 | shell |
|
1535 | 1847 | a pointer to the ipython shell itself. This is needed |
|
1536 | 1848 | because this completer knows about magic functions, and those can |
|
1537 | 1849 | only be accessed via the ipython instance. |
|
1538 | 1850 | namespace : dict, optional |
|
1539 | 1851 | an optional dict where completions are performed. |
|
1540 | 1852 | global_namespace : dict, optional |
|
1541 | 1853 | secondary optional dict for completions, to |
|
1542 | 1854 | handle cases (such as IPython embedded inside functions) where |
|
1543 | 1855 | both Python scopes are visible. |
|
1544 | 1856 | config : Config |
|
1545 | 1857 | traitlet's config object |
|
1546 | 1858 | **kwargs |
|
1547 | 1859 | passed to super class unmodified. |
|
1548 | 1860 | """ |
|
1549 | 1861 | |
|
1550 | 1862 | self.magic_escape = ESC_MAGIC |
|
1551 | 1863 | self.splitter = CompletionSplitter() |
|
1552 | 1864 | |
|
1553 | 1865 | # _greedy_changed() depends on splitter and readline being defined: |
|
1554 | 1866 | super().__init__( |
|
1555 | 1867 | namespace=namespace, |
|
1556 | 1868 | global_namespace=global_namespace, |
|
1557 | 1869 | config=config, |
|
1558 | 1870 | **kwargs, |
|
1559 | 1871 | ) |
|
1560 | 1872 | |
|
1561 | 1873 | # List where completion matches will be stored |
|
1562 | 1874 | self.matches = [] |
|
1563 | 1875 | self.shell = shell |
|
1564 | 1876 | # Regexp to split filenames with spaces in them |
|
1565 | 1877 | self.space_name_re = re.compile(r'([^\\] )') |
|
1566 | 1878 | # Hold a local ref. to glob.glob for speed |
|
1567 | 1879 | self.glob = glob.glob |
|
1568 | 1880 | |
|
1569 | 1881 | # Determine if we are running on 'dumb' terminals, like (X)Emacs |
|
1570 | 1882 | # buffers, to avoid completion problems. |
|
1571 | 1883 | term = os.environ.get('TERM','xterm') |
|
1572 | 1884 | self.dumb_terminal = term in ['dumb','emacs'] |
|
1573 | 1885 | |
|
1574 | 1886 | # Special handling of backslashes needed in win32 platforms |
|
1575 | 1887 | if sys.platform == "win32": |
|
1576 | 1888 | self.clean_glob = self._clean_glob_win32 |
|
1577 | 1889 | else: |
|
1578 | 1890 | self.clean_glob = self._clean_glob |
|
1579 | 1891 | |
|
1580 | 1892 | #regexp to parse docstring for function signature |
|
1581 | 1893 | self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*') |
|
1582 | 1894 | self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)') |
|
1583 | 1895 | #use this if positional argument name is also needed |
|
1584 | 1896 | #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)') |
|
1585 | 1897 | |
|
1586 | 1898 | self.magic_arg_matchers = [ |
|
1587 | 1899 | self.magic_config_matcher, |
|
1588 | 1900 | self.magic_color_matcher, |
|
1589 | 1901 | ] |
|
1590 | 1902 | |
|
1591 | 1903 | # This is set externally by InteractiveShell |
|
1592 | 1904 | self.custom_completers = None |
|
1593 | 1905 | |
|
1594 | 1906 | # This is a list of names of unicode characters that can be completed |
|
1595 | 1907 | # into their corresponding unicode value. The list is large, so we |
|
1596 | 1908 | # lazily initialize it on first use. Consuming code should access this |
|
1597 | 1909 | # attribute through the `@unicode_names` property. |
|
1598 | 1910 | self._unicode_names = None |
|
1599 | 1911 | |
|
1600 | 1912 | self._backslash_combining_matchers = [ |
|
1601 | 1913 | self.latex_name_matcher, |
|
1602 | 1914 | self.unicode_name_matcher, |
|
1603 | 1915 | back_latex_name_matcher, |
|
1604 | 1916 | back_unicode_name_matcher, |
|
1605 | 1917 | self.fwd_unicode_matcher, |
|
1606 | 1918 | ] |
|
1607 | 1919 | |
|
1608 | 1920 | if not self.backslash_combining_completions: |
|
1609 | 1921 | for matcher in self._backslash_combining_matchers: |
|
1610 |
self.disable_matchers.append(matcher |
|
|
1922 | self.disable_matchers.append(_get_matcher_id(matcher)) | |
|
1611 | 1923 | |
|
1612 | 1924 | if not self.merge_completions: |
|
1613 | 1925 | self.suppress_competing_matchers = True |
|
1614 | 1926 | |
|
1615 | 1927 | @property |
|
1616 | 1928 | def matchers(self) -> List[Matcher]: |
|
1617 | 1929 | """All active matcher routines for completion""" |
|
1618 | 1930 | if self.dict_keys_only: |
|
1619 | 1931 | return [self.dict_key_matcher] |
|
1620 | 1932 | |
|
1621 | 1933 | if self.use_jedi: |
|
1622 | 1934 | return [ |
|
1623 | 1935 | *self.custom_matchers, |
|
1624 | 1936 | *self._backslash_combining_matchers, |
|
1625 | 1937 | *self.magic_arg_matchers, |
|
1626 | 1938 | self.custom_completer_matcher, |
|
1627 | 1939 | self.magic_matcher, |
|
1628 | 1940 | self._jedi_matcher, |
|
1629 | 1941 | self.dict_key_matcher, |
|
1630 | 1942 | self.file_matcher, |
|
1631 | 1943 | ] |
|
1632 | 1944 | else: |
|
1633 | 1945 | return [ |
|
1634 | 1946 | *self.custom_matchers, |
|
1635 | 1947 | *self._backslash_combining_matchers, |
|
1636 | 1948 | *self.magic_arg_matchers, |
|
1637 | 1949 | self.custom_completer_matcher, |
|
1638 | 1950 | self.dict_key_matcher, |
|
1639 | 1951 | # TODO: convert python_matches to v2 API |
|
1640 | 1952 | self.magic_matcher, |
|
1641 | 1953 | self.python_matches, |
|
1642 | 1954 | self.file_matcher, |
|
1643 | 1955 | self.python_func_kw_matcher, |
|
1644 | 1956 | ] |
|
1645 | 1957 | |
|
1646 | 1958 | def all_completions(self, text:str) -> List[str]: |
|
1647 | 1959 | """ |
|
1648 | 1960 | Wrapper around the completion methods for the benefit of emacs. |
|
1649 | 1961 | """ |
|
1650 | 1962 | prefix = text.rpartition('.')[0] |
|
1651 | 1963 | with provisionalcompleter(): |
|
1652 | 1964 | return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text |
|
1653 | 1965 | for c in self.completions(text, len(text))] |
|
1654 | 1966 | |
|
1655 | 1967 | return self.complete(text)[1] |
|
1656 | 1968 | |
|
1657 | 1969 | def _clean_glob(self, text:str): |
|
1658 | 1970 | return self.glob("%s*" % text) |
|
1659 | 1971 | |
|
1660 | 1972 | def _clean_glob_win32(self, text:str): |
|
1661 | 1973 | return [f.replace("\\","/") |
|
1662 | 1974 | for f in self.glob("%s*" % text)] |
|
1663 | 1975 | |
|
1664 | 1976 | @context_matcher() |
|
1665 | 1977 | def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult: |
|
1666 | 1978 | """Same as :any:`file_matches`, but adopted to new Matcher API.""" |
|
1667 | 1979 | matches = self.file_matches(context.token) |
|
1668 | 1980 | # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter, |
|
1669 | 1981 | # starts with `/home/`, `C:\`, etc) |
|
1670 | 1982 | return _convert_matcher_v1_result_to_v2(matches, type="path") |
|
1671 | 1983 | |
|
1672 | 1984 | def file_matches(self, text: str) -> List[str]: |
|
1673 | 1985 | """Match filenames, expanding ~USER type strings. |
|
1674 | 1986 | |
|
1675 | 1987 | Most of the seemingly convoluted logic in this completer is an |
|
1676 | 1988 | attempt to handle filenames with spaces in them. And yet it's not |
|
1677 | 1989 | quite perfect, because Python's readline doesn't expose all of the |
|
1678 | 1990 | GNU readline details needed for this to be done correctly. |
|
1679 | 1991 | |
|
1680 | 1992 | For a filename with a space in it, the printed completions will be |
|
1681 | 1993 | only the parts after what's already been typed (instead of the |
|
1682 | 1994 | full completions, as is normally done). I don't think with the |
|
1683 | 1995 | current (as of Python 2.3) Python readline it's possible to do |
|
1684 | 1996 | better. |
|
1685 | 1997 | |
|
1686 | 1998 | .. deprecated:: 8.6 |
|
1687 | 1999 | You can use :meth:`file_matcher` instead. |
|
1688 | 2000 | """ |
|
1689 | 2001 | |
|
1690 | 2002 | # chars that require escaping with backslash - i.e. chars |
|
1691 | 2003 | # that readline treats incorrectly as delimiters, but we |
|
1692 | 2004 | # don't want to treat as delimiters in filename matching |
|
1693 | 2005 | # when escaped with backslash |
|
1694 | 2006 | if text.startswith('!'): |
|
1695 | 2007 | text = text[1:] |
|
1696 | 2008 | text_prefix = u'!' |
|
1697 | 2009 | else: |
|
1698 | 2010 | text_prefix = u'' |
|
1699 | 2011 | |
|
1700 | 2012 | text_until_cursor = self.text_until_cursor |
|
1701 | 2013 | # track strings with open quotes |
|
1702 | 2014 | open_quotes = has_open_quotes(text_until_cursor) |
|
1703 | 2015 | |
|
1704 | 2016 | if '(' in text_until_cursor or '[' in text_until_cursor: |
|
1705 | 2017 | lsplit = text |
|
1706 | 2018 | else: |
|
1707 | 2019 | try: |
|
1708 | 2020 | # arg_split ~ shlex.split, but with unicode bugs fixed by us |
|
1709 | 2021 | lsplit = arg_split(text_until_cursor)[-1] |
|
1710 | 2022 | except ValueError: |
|
1711 | 2023 | # typically an unmatched ", or backslash without escaped char. |
|
1712 | 2024 | if open_quotes: |
|
1713 | 2025 | lsplit = text_until_cursor.split(open_quotes)[-1] |
|
1714 | 2026 | else: |
|
1715 | 2027 | return [] |
|
1716 | 2028 | except IndexError: |
|
1717 | 2029 | # tab pressed on empty line |
|
1718 | 2030 | lsplit = "" |
|
1719 | 2031 | |
|
1720 | 2032 | if not open_quotes and lsplit != protect_filename(lsplit): |
|
1721 | 2033 | # if protectables are found, do matching on the whole escaped name |
|
1722 | 2034 | has_protectables = True |
|
1723 | 2035 | text0,text = text,lsplit |
|
1724 | 2036 | else: |
|
1725 | 2037 | has_protectables = False |
|
1726 | 2038 | text = os.path.expanduser(text) |
|
1727 | 2039 | |
|
1728 | 2040 | if text == "": |
|
1729 | 2041 | return [text_prefix + protect_filename(f) for f in self.glob("*")] |
|
1730 | 2042 | |
|
1731 | 2043 | # Compute the matches from the filesystem |
|
1732 | 2044 | if sys.platform == 'win32': |
|
1733 | 2045 | m0 = self.clean_glob(text) |
|
1734 | 2046 | else: |
|
1735 | 2047 | m0 = self.clean_glob(text.replace('\\', '')) |
|
1736 | 2048 | |
|
1737 | 2049 | if has_protectables: |
|
1738 | 2050 | # If we had protectables, we need to revert our changes to the |
|
1739 | 2051 | # beginning of filename so that we don't double-write the part |
|
1740 | 2052 | # of the filename we have so far |
|
1741 | 2053 | len_lsplit = len(lsplit) |
|
1742 | 2054 | matches = [text_prefix + text0 + |
|
1743 | 2055 | protect_filename(f[len_lsplit:]) for f in m0] |
|
1744 | 2056 | else: |
|
1745 | 2057 | if open_quotes: |
|
1746 | 2058 | # if we have a string with an open quote, we don't need to |
|
1747 | 2059 | # protect the names beyond the quote (and we _shouldn't_, as |
|
1748 | 2060 | # it would cause bugs when the filesystem call is made). |
|
1749 | 2061 | matches = m0 if sys.platform == "win32" else\ |
|
1750 | 2062 | [protect_filename(f, open_quotes) for f in m0] |
|
1751 | 2063 | else: |
|
1752 | 2064 | matches = [text_prefix + |
|
1753 | 2065 | protect_filename(f) for f in m0] |
|
1754 | 2066 | |
|
1755 | 2067 | # Mark directories in input list by appending '/' to their names. |
|
1756 | 2068 | return [x+'/' if os.path.isdir(x) else x for x in matches] |
|
1757 | 2069 | |
|
1758 | 2070 | @context_matcher() |
|
1759 | 2071 | def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult: |
|
1760 | 2072 | """Match magics.""" |
|
1761 | 2073 | text = context.token |
|
1762 | 2074 | matches = self.magic_matches(text) |
|
1763 | 2075 | result = _convert_matcher_v1_result_to_v2(matches, type="magic") |
|
1764 | 2076 | is_magic_prefix = len(text) > 0 and text[0] == "%" |
|
1765 | 2077 | result["suppress"] = is_magic_prefix and bool(result["completions"]) |
|
1766 | 2078 | return result |
|
1767 | 2079 | |
|
1768 | 2080 | def magic_matches(self, text: str): |
|
1769 | 2081 | """Match magics. |
|
1770 | 2082 | |
|
1771 | 2083 | .. deprecated:: 8.6 |
|
1772 | 2084 | You can use :meth:`magic_matcher` instead. |
|
1773 | 2085 | """ |
|
1774 | 2086 | # Get all shell magics now rather than statically, so magics loaded at |
|
1775 | 2087 | # runtime show up too. |
|
1776 | 2088 | lsm = self.shell.magics_manager.lsmagic() |
|
1777 | 2089 | line_magics = lsm['line'] |
|
1778 | 2090 | cell_magics = lsm['cell'] |
|
1779 | 2091 | pre = self.magic_escape |
|
1780 | 2092 | pre2 = pre+pre |
|
1781 | 2093 | |
|
1782 | 2094 | explicit_magic = text.startswith(pre) |
|
1783 | 2095 | |
|
1784 | 2096 | # Completion logic: |
|
1785 | 2097 | # - user gives %%: only do cell magics |
|
1786 | 2098 | # - user gives %: do both line and cell magics |
|
1787 | 2099 | # - no prefix: do both |
|
1788 | 2100 | # In other words, line magics are skipped if the user gives %% explicitly |
|
1789 | 2101 | # |
|
1790 | 2102 | # We also exclude magics that match any currently visible names: |
|
1791 | 2103 | # https://github.com/ipython/ipython/issues/4877, unless the user has |
|
1792 | 2104 | # typed a %: |
|
1793 | 2105 | # https://github.com/ipython/ipython/issues/10754 |
|
1794 | 2106 | bare_text = text.lstrip(pre) |
|
1795 | 2107 | global_matches = self.global_matches(bare_text) |
|
1796 | 2108 | if not explicit_magic: |
|
1797 | 2109 | def matches(magic): |
|
1798 | 2110 | """ |
|
1799 | 2111 | Filter magics, in particular remove magics that match |
|
1800 | 2112 | a name present in global namespace. |
|
1801 | 2113 | """ |
|
1802 | 2114 | return ( magic.startswith(bare_text) and |
|
1803 | 2115 | magic not in global_matches ) |
|
1804 | 2116 | else: |
|
1805 | 2117 | def matches(magic): |
|
1806 | 2118 | return magic.startswith(bare_text) |
|
1807 | 2119 | |
|
1808 | 2120 | comp = [ pre2+m for m in cell_magics if matches(m)] |
|
1809 | 2121 | if not text.startswith(pre2): |
|
1810 | 2122 | comp += [ pre+m for m in line_magics if matches(m)] |
|
1811 | 2123 | |
|
1812 | 2124 | return comp |
|
1813 | 2125 | |
|
1814 | 2126 | @context_matcher() |
|
1815 | 2127 | def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult: |
|
1816 | 2128 | """Match class names and attributes for %config magic.""" |
|
1817 | 2129 | # NOTE: uses `line_buffer` equivalent for compatibility |
|
1818 | 2130 | matches = self.magic_config_matches(context.line_with_cursor) |
|
1819 | 2131 | return _convert_matcher_v1_result_to_v2(matches, type="param") |
|
1820 | 2132 | |
|
1821 | 2133 | def magic_config_matches(self, text: str) -> List[str]: |
|
1822 | 2134 | """Match class names and attributes for %config magic. |
|
1823 | 2135 | |
|
1824 | 2136 | .. deprecated:: 8.6 |
|
1825 | 2137 | You can use :meth:`magic_config_matcher` instead. |
|
1826 | 2138 | """ |
|
1827 | 2139 | texts = text.strip().split() |
|
1828 | 2140 | |
|
1829 | 2141 | if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'): |
|
1830 | 2142 | # get all configuration classes |
|
1831 | 2143 | classes = sorted(set([ c for c in self.shell.configurables |
|
1832 | 2144 | if c.__class__.class_traits(config=True) |
|
1833 | 2145 | ]), key=lambda x: x.__class__.__name__) |
|
1834 | 2146 | classnames = [ c.__class__.__name__ for c in classes ] |
|
1835 | 2147 | |
|
1836 | 2148 | # return all classnames if config or %config is given |
|
1837 | 2149 | if len(texts) == 1: |
|
1838 | 2150 | return classnames |
|
1839 | 2151 | |
|
1840 | 2152 | # match classname |
|
1841 | 2153 | classname_texts = texts[1].split('.') |
|
1842 | 2154 | classname = classname_texts[0] |
|
1843 | 2155 | classname_matches = [ c for c in classnames |
|
1844 | 2156 | if c.startswith(classname) ] |
|
1845 | 2157 | |
|
1846 | 2158 | # return matched classes or the matched class with attributes |
|
1847 | 2159 | if texts[1].find('.') < 0: |
|
1848 | 2160 | return classname_matches |
|
1849 | 2161 | elif len(classname_matches) == 1 and \ |
|
1850 | 2162 | classname_matches[0] == classname: |
|
1851 | 2163 | cls = classes[classnames.index(classname)].__class__ |
|
1852 | 2164 | help = cls.class_get_help() |
|
1853 | 2165 | # strip leading '--' from cl-args: |
|
1854 | 2166 | help = re.sub(re.compile(r'^--', re.MULTILINE), '', help) |
|
1855 | 2167 | return [ attr.split('=')[0] |
|
1856 | 2168 | for attr in help.strip().splitlines() |
|
1857 | 2169 | if attr.startswith(texts[1]) ] |
|
1858 | 2170 | return [] |
|
1859 | 2171 | |
|
1860 | 2172 | @context_matcher() |
|
1861 | 2173 | def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult: |
|
1862 | 2174 | """Match color schemes for %colors magic.""" |
|
1863 | 2175 | # NOTE: uses `line_buffer` equivalent for compatibility |
|
1864 | 2176 | matches = self.magic_color_matches(context.line_with_cursor) |
|
1865 | 2177 | return _convert_matcher_v1_result_to_v2(matches, type="param") |
|
1866 | 2178 | |
|
1867 | 2179 | def magic_color_matches(self, text: str) -> List[str]: |
|
1868 | 2180 | """Match color schemes for %colors magic. |
|
1869 | 2181 | |
|
1870 | 2182 | .. deprecated:: 8.6 |
|
1871 | 2183 | You can use :meth:`magic_color_matcher` instead. |
|
1872 | 2184 | """ |
|
1873 | 2185 | texts = text.split() |
|
1874 | 2186 | if text.endswith(' '): |
|
1875 | 2187 | # .split() strips off the trailing whitespace. Add '' back |
|
1876 | 2188 | # so that: '%colors ' -> ['%colors', ''] |
|
1877 | 2189 | texts.append('') |
|
1878 | 2190 | |
|
1879 | 2191 | if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'): |
|
1880 | 2192 | prefix = texts[1] |
|
1881 | 2193 | return [ color for color in InspectColors.keys() |
|
1882 | 2194 | if color.startswith(prefix) ] |
|
1883 | 2195 | return [] |
|
1884 | 2196 | |
|
1885 | 2197 | @context_matcher(identifier="IPCompleter.jedi_matcher") |
|
1886 | 2198 | def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult: |
|
1887 | 2199 | matches = self._jedi_matches( |
|
1888 | 2200 | cursor_column=context.cursor_position, |
|
1889 | 2201 | cursor_line=context.cursor_line, |
|
1890 | 2202 | text=context.full_text, |
|
1891 | 2203 | ) |
|
1892 | 2204 | return { |
|
1893 | 2205 | "completions": matches, |
|
1894 | 2206 | # static analysis should not suppress other matchers |
|
1895 | 2207 | "suppress": False, |
|
1896 | 2208 | } |
|
1897 | 2209 | |
|
1898 | 2210 | def _jedi_matches( |
|
1899 | 2211 | self, cursor_column: int, cursor_line: int, text: str |
|
1900 |
) -> Itera |
|
|
2212 | ) -> Iterator[_JediCompletionLike]: | |
|
1901 | 2213 | """ |
|
1902 | 2214 | Return a list of :any:`jedi.api.Completion`s object from a ``text`` and |
|
1903 | 2215 | cursor position. |
|
1904 | 2216 | |
|
1905 | 2217 | Parameters |
|
1906 | 2218 | ---------- |
|
1907 | 2219 | cursor_column : int |
|
1908 | 2220 | column position of the cursor in ``text``, 0-indexed. |
|
1909 | 2221 | cursor_line : int |
|
1910 | 2222 | line position of the cursor in ``text``, 0-indexed |
|
1911 | 2223 | text : str |
|
1912 | 2224 | text to complete |
|
1913 | 2225 | |
|
1914 | 2226 | Notes |
|
1915 | 2227 | ----- |
|
1916 | 2228 | If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion` |
|
1917 | 2229 | object containing a string with the Jedi debug information attached. |
|
1918 | 2230 | |
|
1919 | 2231 | .. deprecated:: 8.6 |
|
1920 | 2232 | You can use :meth:`_jedi_matcher` instead. |
|
1921 | 2233 | """ |
|
1922 | 2234 | namespaces = [self.namespace] |
|
1923 | 2235 | if self.global_namespace is not None: |
|
1924 | 2236 | namespaces.append(self.global_namespace) |
|
1925 | 2237 | |
|
1926 | 2238 | completion_filter = lambda x:x |
|
1927 | 2239 | offset = cursor_to_position(text, cursor_line, cursor_column) |
|
1928 | 2240 | # filter output if we are completing for object members |
|
1929 | 2241 | if offset: |
|
1930 | 2242 | pre = text[offset-1] |
|
1931 | 2243 | if pre == '.': |
|
1932 | 2244 | if self.omit__names == 2: |
|
1933 | 2245 | completion_filter = lambda c:not c.name.startswith('_') |
|
1934 | 2246 | elif self.omit__names == 1: |
|
1935 | 2247 | completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__')) |
|
1936 | 2248 | elif self.omit__names == 0: |
|
1937 | 2249 | completion_filter = lambda x:x |
|
1938 | 2250 | else: |
|
1939 | 2251 | raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names)) |
|
1940 | 2252 | |
|
1941 | 2253 | interpreter = jedi.Interpreter(text[:offset], namespaces) |
|
1942 | 2254 | try_jedi = True |
|
1943 | 2255 | |
|
1944 | 2256 | try: |
|
1945 | 2257 | # find the first token in the current tree -- if it is a ' or " then we are in a string |
|
1946 | 2258 | completing_string = False |
|
1947 | 2259 | try: |
|
1948 | 2260 | first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value')) |
|
1949 | 2261 | except StopIteration: |
|
1950 | 2262 | pass |
|
1951 | 2263 | else: |
|
1952 | 2264 | # note the value may be ', ", or it may also be ''' or """, or |
|
1953 | 2265 | # in some cases, """what/you/typed..., but all of these are |
|
1954 | 2266 | # strings. |
|
1955 | 2267 | completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'} |
|
1956 | 2268 | |
|
1957 | 2269 | # if we are in a string jedi is likely not the right candidate for |
|
1958 | 2270 | # now. Skip it. |
|
1959 | 2271 | try_jedi = not completing_string |
|
1960 | 2272 | except Exception as e: |
|
1961 | 2273 | # many of things can go wrong, we are using private API just don't crash. |
|
1962 | 2274 | if self.debug: |
|
1963 | 2275 | print("Error detecting if completing a non-finished string :", e, '|') |
|
1964 | 2276 | |
|
1965 | 2277 | if not try_jedi: |
|
1966 | return [] | |
|
2278 | return iter([]) | |
|
1967 | 2279 | try: |
|
1968 | 2280 | return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1)) |
|
1969 | 2281 | except Exception as e: |
|
1970 | 2282 | if self.debug: |
|
1971 | return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))] | |
|
2283 | return iter( | |
|
2284 | [ | |
|
2285 | _FakeJediCompletion( | |
|
2286 | 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' | |
|
2287 | % (e) | |
|
2288 | ) | |
|
2289 | ] | |
|
2290 | ) | |
|
1972 | 2291 | else: |
|
1973 | return [] | |
|
2292 | return iter([]) | |
|
1974 | 2293 | |
|
2294 | @completion_matcher(api_version=1) | |
|
1975 | 2295 | def python_matches(self, text: str) -> Iterable[str]: |
|
1976 | 2296 | """Match attributes or global python names""" |
|
1977 | 2297 | if "." in text: |
|
1978 | 2298 | try: |
|
1979 | 2299 | matches = self.attr_matches(text) |
|
1980 | 2300 | if text.endswith('.') and self.omit__names: |
|
1981 | 2301 | if self.omit__names == 1: |
|
1982 | 2302 | # true if txt is _not_ a __ name, false otherwise: |
|
1983 | 2303 | no__name = (lambda txt: |
|
1984 | 2304 | re.match(r'.*\.__.*?__',txt) is None) |
|
1985 | 2305 | else: |
|
1986 | 2306 | # true if txt is _not_ a _ name, false otherwise: |
|
1987 | 2307 | no__name = (lambda txt: |
|
1988 | 2308 | re.match(r'\._.*?',txt[txt.rindex('.'):]) is None) |
|
1989 | 2309 | matches = filter(no__name, matches) |
|
1990 | 2310 | except NameError: |
|
1991 | 2311 | # catches <undefined attributes>.<tab> |
|
1992 | 2312 | matches = [] |
|
1993 | 2313 | else: |
|
1994 | 2314 | matches = self.global_matches(text) |
|
1995 | 2315 | return matches |
|
1996 | 2316 | |
|
1997 | 2317 | def _default_arguments_from_docstring(self, doc): |
|
1998 | 2318 | """Parse the first line of docstring for call signature. |
|
1999 | 2319 | |
|
2000 | 2320 | Docstring should be of the form 'min(iterable[, key=func])\n'. |
|
2001 | 2321 | It can also parse cython docstring of the form |
|
2002 | 2322 | 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'. |
|
2003 | 2323 | """ |
|
2004 | 2324 | if doc is None: |
|
2005 | 2325 | return [] |
|
2006 | 2326 | |
|
2007 | 2327 | #care only the firstline |
|
2008 | 2328 | line = doc.lstrip().splitlines()[0] |
|
2009 | 2329 | |
|
2010 | 2330 | #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*') |
|
2011 | 2331 | #'min(iterable[, key=func])\n' -> 'iterable[, key=func]' |
|
2012 | 2332 | sig = self.docstring_sig_re.search(line) |
|
2013 | 2333 | if sig is None: |
|
2014 | 2334 | return [] |
|
2015 | 2335 | # iterable[, key=func]' -> ['iterable[' ,' key=func]'] |
|
2016 | 2336 | sig = sig.groups()[0].split(',') |
|
2017 | 2337 | ret = [] |
|
2018 | 2338 | for s in sig: |
|
2019 | 2339 | #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)') |
|
2020 | 2340 | ret += self.docstring_kwd_re.findall(s) |
|
2021 | 2341 | return ret |
|
2022 | 2342 | |
|
2023 | 2343 | def _default_arguments(self, obj): |
|
2024 | 2344 | """Return the list of default arguments of obj if it is callable, |
|
2025 | 2345 | or empty list otherwise.""" |
|
2026 | 2346 | call_obj = obj |
|
2027 | 2347 | ret = [] |
|
2028 | 2348 | if inspect.isbuiltin(obj): |
|
2029 | 2349 | pass |
|
2030 | 2350 | elif not (inspect.isfunction(obj) or inspect.ismethod(obj)): |
|
2031 | 2351 | if inspect.isclass(obj): |
|
2032 | 2352 | #for cython embedsignature=True the constructor docstring |
|
2033 | 2353 | #belongs to the object itself not __init__ |
|
2034 | 2354 | ret += self._default_arguments_from_docstring( |
|
2035 | 2355 | getattr(obj, '__doc__', '')) |
|
2036 | 2356 | # for classes, check for __init__,__new__ |
|
2037 | 2357 | call_obj = (getattr(obj, '__init__', None) or |
|
2038 | 2358 | getattr(obj, '__new__', None)) |
|
2039 | 2359 | # for all others, check if they are __call__able |
|
2040 | 2360 | elif hasattr(obj, '__call__'): |
|
2041 | 2361 | call_obj = obj.__call__ |
|
2042 | 2362 | ret += self._default_arguments_from_docstring( |
|
2043 | 2363 | getattr(call_obj, '__doc__', '')) |
|
2044 | 2364 | |
|
2045 | 2365 | _keeps = (inspect.Parameter.KEYWORD_ONLY, |
|
2046 | 2366 | inspect.Parameter.POSITIONAL_OR_KEYWORD) |
|
2047 | 2367 | |
|
2048 | 2368 | try: |
|
2049 | 2369 | sig = inspect.signature(obj) |
|
2050 | 2370 | ret.extend(k for k, v in sig.parameters.items() if |
|
2051 | 2371 | v.kind in _keeps) |
|
2052 | 2372 | except ValueError: |
|
2053 | 2373 | pass |
|
2054 | 2374 | |
|
2055 | 2375 | return list(set(ret)) |
|
2056 | 2376 | |
|
2057 | 2377 | @context_matcher() |
|
2058 | 2378 | def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult: |
|
2059 | 2379 | """Match named parameters (kwargs) of the last open function.""" |
|
2060 | 2380 | matches = self.python_func_kw_matches(context.token) |
|
2061 | 2381 | return _convert_matcher_v1_result_to_v2(matches, type="param") |
|
2062 | 2382 | |
|
2063 | 2383 | def python_func_kw_matches(self, text): |
|
2064 | 2384 | """Match named parameters (kwargs) of the last open function. |
|
2065 | 2385 | |
|
2066 | 2386 | .. deprecated:: 8.6 |
|
2067 | 2387 | You can use :meth:`python_func_kw_matcher` instead. |
|
2068 | 2388 | """ |
|
2069 | 2389 | |
|
2070 | 2390 | if "." in text: # a parameter cannot be dotted |
|
2071 | 2391 | return [] |
|
2072 | 2392 | try: regexp = self.__funcParamsRegex |
|
2073 | 2393 | except AttributeError: |
|
2074 | 2394 | regexp = self.__funcParamsRegex = re.compile(r''' |
|
2075 | 2395 | '.*?(?<!\\)' | # single quoted strings or |
|
2076 | 2396 | ".*?(?<!\\)" | # double quoted strings or |
|
2077 | 2397 | \w+ | # identifier |
|
2078 | 2398 | \S # other characters |
|
2079 | 2399 | ''', re.VERBOSE | re.DOTALL) |
|
2080 | 2400 | # 1. find the nearest identifier that comes before an unclosed |
|
2081 | 2401 | # parenthesis before the cursor |
|
2082 | 2402 | # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo" |
|
2083 | 2403 | tokens = regexp.findall(self.text_until_cursor) |
|
2084 | 2404 | iterTokens = reversed(tokens); openPar = 0 |
|
2085 | 2405 | |
|
2086 | 2406 | for token in iterTokens: |
|
2087 | 2407 | if token == ')': |
|
2088 | 2408 | openPar -= 1 |
|
2089 | 2409 | elif token == '(': |
|
2090 | 2410 | openPar += 1 |
|
2091 | 2411 | if openPar > 0: |
|
2092 | 2412 | # found the last unclosed parenthesis |
|
2093 | 2413 | break |
|
2094 | 2414 | else: |
|
2095 | 2415 | return [] |
|
2096 | 2416 | # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" ) |
|
2097 | 2417 | ids = [] |
|
2098 | 2418 | isId = re.compile(r'\w+$').match |
|
2099 | 2419 | |
|
2100 | 2420 | while True: |
|
2101 | 2421 | try: |
|
2102 | 2422 | ids.append(next(iterTokens)) |
|
2103 | 2423 | if not isId(ids[-1]): |
|
2104 | 2424 | ids.pop(); break |
|
2105 | 2425 | if not next(iterTokens) == '.': |
|
2106 | 2426 | break |
|
2107 | 2427 | except StopIteration: |
|
2108 | 2428 | break |
|
2109 | 2429 | |
|
2110 | 2430 | # Find all named arguments already assigned to, as to avoid suggesting |
|
2111 | 2431 | # them again |
|
2112 | 2432 | usedNamedArgs = set() |
|
2113 | 2433 | par_level = -1 |
|
2114 | 2434 | for token, next_token in zip(tokens, tokens[1:]): |
|
2115 | 2435 | if token == '(': |
|
2116 | 2436 | par_level += 1 |
|
2117 | 2437 | elif token == ')': |
|
2118 | 2438 | par_level -= 1 |
|
2119 | 2439 | |
|
2120 | 2440 | if par_level != 0: |
|
2121 | 2441 | continue |
|
2122 | 2442 | |
|
2123 | 2443 | if next_token != '=': |
|
2124 | 2444 | continue |
|
2125 | 2445 | |
|
2126 | 2446 | usedNamedArgs.add(token) |
|
2127 | 2447 | |
|
2128 | 2448 | argMatches = [] |
|
2129 | 2449 | try: |
|
2130 | 2450 | callableObj = '.'.join(ids[::-1]) |
|
2131 | 2451 | namedArgs = self._default_arguments(eval(callableObj, |
|
2132 | 2452 | self.namespace)) |
|
2133 | 2453 | |
|
2134 | 2454 | # Remove used named arguments from the list, no need to show twice |
|
2135 | 2455 | for namedArg in set(namedArgs) - usedNamedArgs: |
|
2136 | 2456 | if namedArg.startswith(text): |
|
2137 | 2457 | argMatches.append("%s=" %namedArg) |
|
2138 | 2458 | except: |
|
2139 | 2459 | pass |
|
2140 | 2460 | |
|
2141 | 2461 | return argMatches |
|
2142 | 2462 | |
|
2143 | 2463 | @staticmethod |
|
2144 | 2464 | def _get_keys(obj: Any) -> List[Any]: |
|
2145 | 2465 | # Objects can define their own completions by defining an |
|
2146 | 2466 | # _ipy_key_completions_() method. |
|
2147 | 2467 | method = get_real_method(obj, '_ipython_key_completions_') |
|
2148 | 2468 | if method is not None: |
|
2149 | 2469 | return method() |
|
2150 | 2470 | |
|
2151 | 2471 | # Special case some common in-memory dict-like types |
|
2152 | if isinstance(obj, dict) or\ | |
|
2153 | _safe_isinstance(obj, 'pandas', 'DataFrame'): | |
|
2472 | if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"): | |
|
2154 | 2473 | try: |
|
2155 | 2474 | return list(obj.keys()) |
|
2156 | 2475 | except Exception: |
|
2157 | 2476 | return [] |
|
2477 | elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"): | |
|
2478 | try: | |
|
2479 | return list(obj.obj.keys()) | |
|
2480 | except Exception: | |
|
2481 | return [] | |
|
2158 | 2482 | elif _safe_isinstance(obj, 'numpy', 'ndarray') or\ |
|
2159 | 2483 | _safe_isinstance(obj, 'numpy', 'void'): |
|
2160 | 2484 | return obj.dtype.names or [] |
|
2161 | 2485 | return [] |
|
2162 | 2486 | |
|
2163 | 2487 | @context_matcher() |
|
2164 | 2488 | def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult: |
|
2165 | 2489 | """Match string keys in a dictionary, after e.g. ``foo[``.""" |
|
2166 | 2490 | matches = self.dict_key_matches(context.token) |
|
2167 | 2491 | return _convert_matcher_v1_result_to_v2( |
|
2168 | 2492 | matches, type="dict key", suppress_if_matches=True |
|
2169 | 2493 | ) |
|
2170 | 2494 | |
|
2171 | 2495 | def dict_key_matches(self, text: str) -> List[str]: |
|
2172 | 2496 | """Match string keys in a dictionary, after e.g. ``foo[``. |
|
2173 | 2497 | |
|
2174 | 2498 | .. deprecated:: 8.6 |
|
2175 | 2499 | You can use :meth:`dict_key_matcher` instead. |
|
2176 | 2500 | """ |
|
2177 | 2501 | |
|
2178 | if self.__dict_key_regexps is not None: | |
|
2179 | regexps = self.__dict_key_regexps | |
|
2180 | else: | |
|
2181 | dict_key_re_fmt = r'''(?x) | |
|
2182 | ( # match dict-referring expression wrt greedy setting | |
|
2183 | %s | |
|
2184 | ) | |
|
2185 | \[ # open bracket | |
|
2186 | \s* # and optional whitespace | |
|
2187 | # Capture any number of str-like objects (e.g. "a", "b", 'c') | |
|
2188 | ((?:[uUbB]? # string prefix (r not handled) | |
|
2189 | (?: | |
|
2190 | '(?:[^']|(?<!\\)\\')*' | |
|
2191 | | | |
|
2192 | "(?:[^"]|(?<!\\)\\")*" | |
|
2193 | ) | |
|
2194 | \s*,\s* | |
|
2195 | )*) | |
|
2196 | ([uUbB]? # string prefix (r not handled) | |
|
2197 | (?: # unclosed string | |
|
2198 | '(?:[^']|(?<!\\)\\')* | |
|
2199 | | | |
|
2200 | "(?:[^"]|(?<!\\)\\")* | |
|
2201 | ) | |
|
2202 | )? | |
|
2203 | $ | |
|
2204 | ''' | |
|
2205 | regexps = self.__dict_key_regexps = { | |
|
2206 | False: re.compile(dict_key_re_fmt % r''' | |
|
2207 | # identifiers separated by . | |
|
2208 | (?!\d)\w+ | |
|
2209 | (?:\.(?!\d)\w+)* | |
|
2210 | '''), | |
|
2211 | True: re.compile(dict_key_re_fmt % ''' | |
|
2212 | .+ | |
|
2213 | ''') | |
|
2214 | } | |
|
2502 | # Short-circuit on closed dictionary (regular expression would | |
|
2503 | # not match anyway, but would take quite a while). | |
|
2504 | if self.text_until_cursor.strip().endswith("]"): | |
|
2505 | return [] | |
|
2215 | 2506 | |
|
2216 |
match = |
|
|
2507 | match = DICT_MATCHER_REGEX.search(self.text_until_cursor) | |
|
2217 | 2508 | |
|
2218 | 2509 | if match is None: |
|
2219 | 2510 | return [] |
|
2220 | 2511 | |
|
2221 |
expr, pr |
|
|
2222 | try: | |
|
2223 | obj = eval(expr, self.namespace) | |
|
2224 | except Exception: | |
|
2225 | try: | |
|
2226 | obj = eval(expr, self.global_namespace) | |
|
2227 | except Exception: | |
|
2512 | expr, prior_tuple_keys, key_prefix = match.groups() | |
|
2513 | ||
|
2514 | obj = self._evaluate_expr(expr) | |
|
2515 | ||
|
2516 | if obj is not_found: | |
|
2228 | 2517 |
|
|
2229 | 2518 | |
|
2230 | 2519 | keys = self._get_keys(obj) |
|
2231 | 2520 | if not keys: |
|
2232 | 2521 | return keys |
|
2233 | 2522 | |
|
2234 | extra_prefix = eval(prefix0) if prefix0 != '' else None | |
|
2523 | tuple_prefix = guarded_eval( | |
|
2524 | prior_tuple_keys, | |
|
2525 | EvaluationContext( | |
|
2526 | globals=self.global_namespace, | |
|
2527 | locals=self.namespace, | |
|
2528 | evaluation=self.evaluation, | |
|
2529 | in_subscript=True, | |
|
2530 | ), | |
|
2531 | ) | |
|
2235 | 2532 | |
|
2236 |
closing_quote, token_offset, matches = match_dict_keys( |
|
|
2533 | closing_quote, token_offset, matches = match_dict_keys( | |
|
2534 | keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix | |
|
2535 | ) | |
|
2237 | 2536 | if not matches: |
|
2238 |
return |
|
|
2537 | return [] | |
|
2239 | 2538 | |
|
2240 | 2539 | # get the cursor position of |
|
2241 | 2540 | # - the text being completed |
|
2242 | 2541 | # - the start of the key text |
|
2243 | 2542 | # - the start of the completion |
|
2244 | 2543 | text_start = len(self.text_until_cursor) - len(text) |
|
2245 | if prefix: | |
|
2544 | if key_prefix: | |
|
2246 | 2545 | key_start = match.start(3) |
|
2247 | 2546 | completion_start = key_start + token_offset |
|
2248 | 2547 | else: |
|
2249 | 2548 | key_start = completion_start = match.end() |
|
2250 | 2549 | |
|
2251 | 2550 | # grab the leading prefix, to make sure all completions start with `text` |
|
2252 | 2551 | if text_start > key_start: |
|
2253 | 2552 | leading = '' |
|
2254 | 2553 | else: |
|
2255 | 2554 | leading = text[text_start:completion_start] |
|
2256 | 2555 | |
|
2257 | # the index of the `[` character | |
|
2258 | bracket_idx = match.end(1) | |
|
2259 | ||
|
2260 | 2556 | # append closing quote and bracket as appropriate |
|
2261 | 2557 | # this is *not* appropriate if the opening quote or bracket is outside |
|
2262 | # the text given to this method | |
|
2263 | suf = '' | |
|
2264 | continuation = self.line_buffer[len(self.text_until_cursor):] | |
|
2265 | if key_start > text_start and closing_quote: | |
|
2266 | # quotes were opened inside text, maybe close them | |
|
2558 | # the text given to this method, e.g. `d["""a\nt | |
|
2559 | can_close_quote = False | |
|
2560 | can_close_bracket = False | |
|
2561 | ||
|
2562 | continuation = self.line_buffer[len(self.text_until_cursor) :].strip() | |
|
2563 | ||
|
2267 | 2564 |
|
|
2565 | # do not close if already closed, e.g. `d['a<tab>'` | |
|
2268 | 2566 |
|
|
2269 | 2567 |
|
|
2270 |
|
|
|
2271 | if bracket_idx > text_start: | |
|
2272 | # brackets were opened inside text, maybe close them | |
|
2273 | if not continuation.startswith(']'): | |
|
2274 | suf += ']' | |
|
2568 | can_close_quote = True | |
|
2569 | ||
|
2570 | continuation = continuation.strip() | |
|
2571 | ||
|
2572 | # e.g. `pandas.DataFrame` has different tuple indexer behaviour, | |
|
2573 | # handling it is out of scope, so let's avoid appending suffixes. | |
|
2574 | has_known_tuple_handling = isinstance(obj, dict) | |
|
2575 | ||
|
2576 | can_close_bracket = ( | |
|
2577 | not continuation.startswith("]") and self.auto_close_dict_keys | |
|
2578 | ) | |
|
2579 | can_close_tuple_item = ( | |
|
2580 | not continuation.startswith(",") | |
|
2581 | and has_known_tuple_handling | |
|
2582 | and self.auto_close_dict_keys | |
|
2583 | ) | |
|
2584 | can_close_quote = can_close_quote and self.auto_close_dict_keys | |
|
2275 | 2585 | |
|
2276 | return [leading + k + suf for k in matches] | |
|
2586 | # fast path if closing qoute should be appended but not suffix is allowed | |
|
2587 | if not can_close_quote and not can_close_bracket and closing_quote: | |
|
2588 | return [leading + k for k in matches] | |
|
2589 | ||
|
2590 | results = [] | |
|
2591 | ||
|
2592 | end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM | |
|
2593 | ||
|
2594 | for k, state_flag in matches.items(): | |
|
2595 | result = leading + k | |
|
2596 | if can_close_quote and closing_quote: | |
|
2597 | result += closing_quote | |
|
2598 | ||
|
2599 | if state_flag == end_of_tuple_or_item: | |
|
2600 | # We do not know which suffix to add, | |
|
2601 | # e.g. both tuple item and string | |
|
2602 | # match this item. | |
|
2603 | pass | |
|
2604 | ||
|
2605 | if state_flag in end_of_tuple_or_item and can_close_bracket: | |
|
2606 | result += "]" | |
|
2607 | if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item: | |
|
2608 | result += ", " | |
|
2609 | results.append(result) | |
|
2610 | return results | |
|
2277 | 2611 | |
|
2278 | 2612 | @context_matcher() |
|
2279 | 2613 | def unicode_name_matcher(self, context: CompletionContext): |
|
2280 | 2614 | """Same as :any:`unicode_name_matches`, but adopted to new Matcher API.""" |
|
2281 | 2615 | fragment, matches = self.unicode_name_matches(context.text_until_cursor) |
|
2282 | 2616 | return _convert_matcher_v1_result_to_v2( |
|
2283 | 2617 | matches, type="unicode", fragment=fragment, suppress_if_matches=True |
|
2284 | 2618 | ) |
|
2285 | 2619 | |
|
2286 | 2620 | @staticmethod |
|
2287 | 2621 | def unicode_name_matches(text: str) -> Tuple[str, List[str]]: |
|
2288 | 2622 | """Match Latex-like syntax for unicode characters base |
|
2289 | 2623 | on the name of the character. |
|
2290 | 2624 | |
|
2291 | 2625 | This does ``\\GREEK SMALL LETTER ETA`` -> ``η`` |
|
2292 | 2626 | |
|
2293 | 2627 | Works only on valid python 3 identifier, or on combining characters that |
|
2294 | 2628 | will combine to form a valid identifier. |
|
2295 | 2629 | """ |
|
2296 | 2630 | slashpos = text.rfind('\\') |
|
2297 | 2631 | if slashpos > -1: |
|
2298 | 2632 | s = text[slashpos+1:] |
|
2299 | 2633 | try : |
|
2300 | 2634 | unic = unicodedata.lookup(s) |
|
2301 | 2635 | # allow combining chars |
|
2302 | 2636 | if ('a'+unic).isidentifier(): |
|
2303 | 2637 | return '\\'+s,[unic] |
|
2304 | 2638 | except KeyError: |
|
2305 | 2639 | pass |
|
2306 | 2640 | return '', [] |
|
2307 | 2641 | |
|
2308 | 2642 | @context_matcher() |
|
2309 | 2643 | def latex_name_matcher(self, context: CompletionContext): |
|
2310 | 2644 | """Match Latex syntax for unicode characters. |
|
2311 | 2645 | |
|
2312 | 2646 | This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α`` |
|
2313 | 2647 | """ |
|
2314 | 2648 | fragment, matches = self.latex_matches(context.text_until_cursor) |
|
2315 | 2649 | return _convert_matcher_v1_result_to_v2( |
|
2316 | 2650 | matches, type="latex", fragment=fragment, suppress_if_matches=True |
|
2317 | 2651 | ) |
|
2318 | 2652 | |
|
2319 | 2653 | def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]: |
|
2320 | 2654 | """Match Latex syntax for unicode characters. |
|
2321 | 2655 | |
|
2322 | 2656 | This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α`` |
|
2323 | 2657 | |
|
2324 | 2658 | .. deprecated:: 8.6 |
|
2325 | 2659 | You can use :meth:`latex_name_matcher` instead. |
|
2326 | 2660 | """ |
|
2327 | 2661 | slashpos = text.rfind('\\') |
|
2328 | 2662 | if slashpos > -1: |
|
2329 | 2663 | s = text[slashpos:] |
|
2330 | 2664 | if s in latex_symbols: |
|
2331 | 2665 | # Try to complete a full latex symbol to unicode |
|
2332 | 2666 | # \\alpha -> α |
|
2333 | 2667 | return s, [latex_symbols[s]] |
|
2334 | 2668 | else: |
|
2335 | 2669 | # If a user has partially typed a latex symbol, give them |
|
2336 | 2670 | # a full list of options \al -> [\aleph, \alpha] |
|
2337 | 2671 | matches = [k for k in latex_symbols if k.startswith(s)] |
|
2338 | 2672 | if matches: |
|
2339 | 2673 | return s, matches |
|
2340 | 2674 | return '', () |
|
2341 | 2675 | |
|
2342 | 2676 | @context_matcher() |
|
2343 | 2677 | def custom_completer_matcher(self, context): |
|
2344 | 2678 | """Dispatch custom completer. |
|
2345 | 2679 | |
|
2346 | 2680 | If a match is found, suppresses all other matchers except for Jedi. |
|
2347 | 2681 | """ |
|
2348 | 2682 | matches = self.dispatch_custom_completer(context.token) or [] |
|
2349 | 2683 | result = _convert_matcher_v1_result_to_v2( |
|
2350 | 2684 | matches, type=_UNKNOWN_TYPE, suppress_if_matches=True |
|
2351 | 2685 | ) |
|
2352 | 2686 | result["ordered"] = True |
|
2353 | 2687 | result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)} |
|
2354 | 2688 | return result |
|
2355 | 2689 | |
|
2356 | 2690 | def dispatch_custom_completer(self, text): |
|
2357 | 2691 | """ |
|
2358 | 2692 | .. deprecated:: 8.6 |
|
2359 | 2693 | You can use :meth:`custom_completer_matcher` instead. |
|
2360 | 2694 | """ |
|
2361 | 2695 | if not self.custom_completers: |
|
2362 | 2696 | return |
|
2363 | 2697 | |
|
2364 | 2698 | line = self.line_buffer |
|
2365 | 2699 | if not line.strip(): |
|
2366 | 2700 | return None |
|
2367 | 2701 | |
|
2368 | 2702 | # Create a little structure to pass all the relevant information about |
|
2369 | 2703 | # the current completion to any custom completer. |
|
2370 | 2704 | event = SimpleNamespace() |
|
2371 | 2705 | event.line = line |
|
2372 | 2706 | event.symbol = text |
|
2373 | 2707 | cmd = line.split(None,1)[0] |
|
2374 | 2708 | event.command = cmd |
|
2375 | 2709 | event.text_until_cursor = self.text_until_cursor |
|
2376 | 2710 | |
|
2377 | 2711 | # for foo etc, try also to find completer for %foo |
|
2378 | 2712 | if not cmd.startswith(self.magic_escape): |
|
2379 | 2713 | try_magic = self.custom_completers.s_matches( |
|
2380 | 2714 | self.magic_escape + cmd) |
|
2381 | 2715 | else: |
|
2382 | 2716 | try_magic = [] |
|
2383 | 2717 | |
|
2384 | 2718 | for c in itertools.chain(self.custom_completers.s_matches(cmd), |
|
2385 | 2719 | try_magic, |
|
2386 | 2720 | self.custom_completers.flat_matches(self.text_until_cursor)): |
|
2387 | 2721 | try: |
|
2388 | 2722 | res = c(event) |
|
2389 | 2723 | if res: |
|
2390 | 2724 | # first, try case sensitive match |
|
2391 | 2725 | withcase = [r for r in res if r.startswith(text)] |
|
2392 | 2726 | if withcase: |
|
2393 | 2727 | return withcase |
|
2394 | 2728 | # if none, then case insensitive ones are ok too |
|
2395 | 2729 | text_low = text.lower() |
|
2396 | 2730 | return [r for r in res if r.lower().startswith(text_low)] |
|
2397 | 2731 | except TryNext: |
|
2398 | 2732 | pass |
|
2399 | 2733 | except KeyboardInterrupt: |
|
2400 | 2734 | """ |
|
2401 | 2735 | If custom completer take too long, |
|
2402 | 2736 | let keyboard interrupt abort and return nothing. |
|
2403 | 2737 | """ |
|
2404 | 2738 | break |
|
2405 | 2739 | |
|
2406 | 2740 | return None |
|
2407 | 2741 | |
|
2408 | 2742 | def completions(self, text: str, offset: int)->Iterator[Completion]: |
|
2409 | 2743 | """ |
|
2410 | 2744 | Returns an iterator over the possible completions |
|
2411 | 2745 | |
|
2412 | 2746 | .. warning:: |
|
2413 | 2747 | |
|
2414 | 2748 | Unstable |
|
2415 | 2749 | |
|
2416 | 2750 | This function is unstable, API may change without warning. |
|
2417 | 2751 | It will also raise unless use in proper context manager. |
|
2418 | 2752 | |
|
2419 | 2753 | Parameters |
|
2420 | 2754 | ---------- |
|
2421 | 2755 | text : str |
|
2422 | 2756 | Full text of the current input, multi line string. |
|
2423 | 2757 | offset : int |
|
2424 | 2758 | Integer representing the position of the cursor in ``text``. Offset |
|
2425 | 2759 | is 0-based indexed. |
|
2426 | 2760 | |
|
2427 | 2761 | Yields |
|
2428 | 2762 | ------ |
|
2429 | 2763 | Completion |
|
2430 | 2764 | |
|
2431 | 2765 | Notes |
|
2432 | 2766 | ----- |
|
2433 | 2767 | The cursor on a text can either be seen as being "in between" |
|
2434 | 2768 | characters or "On" a character depending on the interface visible to |
|
2435 | 2769 | the user. For consistency the cursor being on "in between" characters X |
|
2436 | 2770 | and Y is equivalent to the cursor being "on" character Y, that is to say |
|
2437 | 2771 | the character the cursor is on is considered as being after the cursor. |
|
2438 | 2772 | |
|
2439 | 2773 | Combining characters may span more that one position in the |
|
2440 | 2774 | text. |
|
2441 | 2775 | |
|
2442 | 2776 | .. note:: |
|
2443 | 2777 | |
|
2444 | 2778 | If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--`` |
|
2445 | 2779 | fake Completion token to distinguish completion returned by Jedi |
|
2446 | 2780 | and usual IPython completion. |
|
2447 | 2781 | |
|
2448 | 2782 | .. note:: |
|
2449 | 2783 | |
|
2450 | 2784 | Completions are not completely deduplicated yet. If identical |
|
2451 | 2785 | completions are coming from different sources this function does not |
|
2452 | 2786 | ensure that each completion object will only be present once. |
|
2453 | 2787 | """ |
|
2454 | 2788 | warnings.warn("_complete is a provisional API (as of IPython 6.0). " |
|
2455 | 2789 | "It may change without warnings. " |
|
2456 | 2790 | "Use in corresponding context manager.", |
|
2457 | 2791 | category=ProvisionalCompleterWarning, stacklevel=2) |
|
2458 | 2792 | |
|
2459 | 2793 | seen = set() |
|
2460 | 2794 | profiler:Optional[cProfile.Profile] |
|
2461 | 2795 | try: |
|
2462 | 2796 | if self.profile_completions: |
|
2463 | 2797 | import cProfile |
|
2464 | 2798 | profiler = cProfile.Profile() |
|
2465 | 2799 | profiler.enable() |
|
2466 | 2800 | else: |
|
2467 | 2801 | profiler = None |
|
2468 | 2802 | |
|
2469 | 2803 | for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000): |
|
2470 | 2804 | if c and (c in seen): |
|
2471 | 2805 | continue |
|
2472 | 2806 | yield c |
|
2473 | 2807 | seen.add(c) |
|
2474 | 2808 | except KeyboardInterrupt: |
|
2475 | 2809 | """if completions take too long and users send keyboard interrupt, |
|
2476 | 2810 | do not crash and return ASAP. """ |
|
2477 | 2811 | pass |
|
2478 | 2812 | finally: |
|
2479 | 2813 | if profiler is not None: |
|
2480 | 2814 | profiler.disable() |
|
2481 | 2815 | ensure_dir_exists(self.profiler_output_dir) |
|
2482 | 2816 | output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4())) |
|
2483 | 2817 | print("Writing profiler output to", output_path) |
|
2484 | 2818 | profiler.dump_stats(output_path) |
|
2485 | 2819 | |
|
2486 | 2820 | def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]: |
|
2487 | 2821 | """ |
|
2488 | 2822 | Core completion module.Same signature as :any:`completions`, with the |
|
2489 | 2823 | extra `timeout` parameter (in seconds). |
|
2490 | 2824 | |
|
2491 | 2825 | Computing jedi's completion ``.type`` can be quite expensive (it is a |
|
2492 | 2826 | lazy property) and can require some warm-up, more warm up than just |
|
2493 | 2827 | computing the ``name`` of a completion. The warm-up can be : |
|
2494 | 2828 | |
|
2495 | 2829 | - Long warm-up the first time a module is encountered after |
|
2496 | 2830 | install/update: actually build parse/inference tree. |
|
2497 | 2831 | |
|
2498 | 2832 | - first time the module is encountered in a session: load tree from |
|
2499 | 2833 | disk. |
|
2500 | 2834 | |
|
2501 | 2835 | We don't want to block completions for tens of seconds so we give the |
|
2502 | 2836 | completer a "budget" of ``_timeout`` seconds per invocation to compute |
|
2503 | 2837 | completions types, the completions that have not yet been computed will |
|
2504 | 2838 | be marked as "unknown" an will have a chance to be computed next round |
|
2505 | 2839 | are things get cached. |
|
2506 | 2840 | |
|
2507 | 2841 | Keep in mind that Jedi is not the only thing treating the completion so |
|
2508 | 2842 | keep the timeout short-ish as if we take more than 0.3 second we still |
|
2509 | 2843 | have lots of processing to do. |
|
2510 | 2844 | |
|
2511 | 2845 | """ |
|
2512 | 2846 | deadline = time.monotonic() + _timeout |
|
2513 | 2847 | |
|
2514 | 2848 | before = full_text[:offset] |
|
2515 | 2849 | cursor_line, cursor_column = position_to_cursor(full_text, offset) |
|
2516 | 2850 | |
|
2517 | 2851 | jedi_matcher_id = _get_matcher_id(self._jedi_matcher) |
|
2518 | 2852 | |
|
2853 | def is_non_jedi_result( | |
|
2854 | result: MatcherResult, identifier: str | |
|
2855 | ) -> TypeGuard[SimpleMatcherResult]: | |
|
2856 | return identifier != jedi_matcher_id | |
|
2857 | ||
|
2519 | 2858 | results = self._complete( |
|
2520 | 2859 | full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column |
|
2521 | 2860 | ) |
|
2861 | ||
|
2522 | 2862 | non_jedi_results: Dict[str, SimpleMatcherResult] = { |
|
2523 | 2863 | identifier: result |
|
2524 | 2864 | for identifier, result in results.items() |
|
2525 | if identifier != jedi_matcher_id | |
|
2865 | if is_non_jedi_result(result, identifier) | |
|
2526 | 2866 | } |
|
2527 | 2867 | |
|
2528 | 2868 | jedi_matches = ( |
|
2529 |
cast(results[jedi_matcher_id] |
|
|
2869 | cast(_JediMatcherResult, results[jedi_matcher_id])["completions"] | |
|
2530 | 2870 | if jedi_matcher_id in results |
|
2531 | 2871 | else () |
|
2532 | 2872 | ) |
|
2533 | 2873 | |
|
2534 | 2874 | iter_jm = iter(jedi_matches) |
|
2535 | 2875 | if _timeout: |
|
2536 | 2876 | for jm in iter_jm: |
|
2537 | 2877 | try: |
|
2538 | 2878 | type_ = jm.type |
|
2539 | 2879 | except Exception: |
|
2540 | 2880 | if self.debug: |
|
2541 | 2881 | print("Error in Jedi getting type of ", jm) |
|
2542 | 2882 | type_ = None |
|
2543 | 2883 | delta = len(jm.name_with_symbols) - len(jm.complete) |
|
2544 | 2884 | if type_ == 'function': |
|
2545 | 2885 | signature = _make_signature(jm) |
|
2546 | 2886 | else: |
|
2547 | 2887 | signature = '' |
|
2548 | 2888 | yield Completion(start=offset - delta, |
|
2549 | 2889 | end=offset, |
|
2550 | 2890 | text=jm.name_with_symbols, |
|
2551 | 2891 | type=type_, |
|
2552 | 2892 | signature=signature, |
|
2553 | 2893 | _origin='jedi') |
|
2554 | 2894 | |
|
2555 | 2895 | if time.monotonic() > deadline: |
|
2556 | 2896 | break |
|
2557 | 2897 | |
|
2558 | 2898 | for jm in iter_jm: |
|
2559 | 2899 | delta = len(jm.name_with_symbols) - len(jm.complete) |
|
2560 | 2900 | yield Completion( |
|
2561 | 2901 | start=offset - delta, |
|
2562 | 2902 | end=offset, |
|
2563 | 2903 | text=jm.name_with_symbols, |
|
2564 | 2904 | type=_UNKNOWN_TYPE, # don't compute type for speed |
|
2565 | 2905 | _origin="jedi", |
|
2566 | 2906 | signature="", |
|
2567 | 2907 | ) |
|
2568 | 2908 | |
|
2569 | 2909 | # TODO: |
|
2570 | 2910 | # Suppress this, right now just for debug. |
|
2571 | 2911 | if jedi_matches and non_jedi_results and self.debug: |
|
2572 | 2912 | some_start_offset = before.rfind( |
|
2573 | 2913 | next(iter(non_jedi_results.values()))["matched_fragment"] |
|
2574 | 2914 | ) |
|
2575 | 2915 | yield Completion( |
|
2576 | 2916 | start=some_start_offset, |
|
2577 | 2917 | end=offset, |
|
2578 | 2918 | text="--jedi/ipython--", |
|
2579 | 2919 | _origin="debug", |
|
2580 | 2920 | type="none", |
|
2581 | 2921 | signature="", |
|
2582 | 2922 | ) |
|
2583 | 2923 | |
|
2584 | ordered = [] | |
|
2585 | sortable = [] | |
|
2924 | ordered: List[Completion] = [] | |
|
2925 | sortable: List[Completion] = [] | |
|
2586 | 2926 | |
|
2587 | 2927 | for origin, result in non_jedi_results.items(): |
|
2588 | 2928 | matched_text = result["matched_fragment"] |
|
2589 | 2929 | start_offset = before.rfind(matched_text) |
|
2590 | 2930 | is_ordered = result.get("ordered", False) |
|
2591 | 2931 | container = ordered if is_ordered else sortable |
|
2592 | 2932 | |
|
2593 | 2933 | # I'm unsure if this is always true, so let's assert and see if it |
|
2594 | 2934 | # crash |
|
2595 | 2935 | assert before.endswith(matched_text) |
|
2596 | 2936 | |
|
2597 | 2937 | for simple_completion in result["completions"]: |
|
2598 | 2938 | completion = Completion( |
|
2599 | 2939 | start=start_offset, |
|
2600 | 2940 | end=offset, |
|
2601 | 2941 | text=simple_completion.text, |
|
2602 | 2942 | _origin=origin, |
|
2603 | 2943 | signature="", |
|
2604 | 2944 | type=simple_completion.type or _UNKNOWN_TYPE, |
|
2605 | 2945 | ) |
|
2606 | 2946 | container.append(completion) |
|
2607 | 2947 | |
|
2608 | 2948 | yield from list(self._deduplicate(ordered + self._sort(sortable)))[ |
|
2609 | 2949 | :MATCHES_LIMIT |
|
2610 | 2950 | ] |
|
2611 | 2951 | |
|
2612 | 2952 | def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]: |
|
2613 | 2953 | """Find completions for the given text and line context. |
|
2614 | 2954 | |
|
2615 | 2955 | Note that both the text and the line_buffer are optional, but at least |
|
2616 | 2956 | one of them must be given. |
|
2617 | 2957 | |
|
2618 | 2958 | Parameters |
|
2619 | 2959 | ---------- |
|
2620 | 2960 | text : string, optional |
|
2621 | 2961 | Text to perform the completion on. If not given, the line buffer |
|
2622 | 2962 | is split using the instance's CompletionSplitter object. |
|
2623 | 2963 | line_buffer : string, optional |
|
2624 | 2964 | If not given, the completer attempts to obtain the current line |
|
2625 | 2965 | buffer via readline. This keyword allows clients which are |
|
2626 | 2966 | requesting for text completions in non-readline contexts to inform |
|
2627 | 2967 | the completer of the entire text. |
|
2628 | 2968 | cursor_pos : int, optional |
|
2629 | 2969 | Index of the cursor in the full line buffer. Should be provided by |
|
2630 | 2970 | remote frontends where kernel has no access to frontend state. |
|
2631 | 2971 | |
|
2632 | 2972 | Returns |
|
2633 | 2973 | ------- |
|
2634 | 2974 | Tuple of two items: |
|
2635 | 2975 | text : str |
|
2636 | 2976 | Text that was actually used in the completion. |
|
2637 | 2977 | matches : list |
|
2638 | 2978 | A list of completion matches. |
|
2639 | 2979 | |
|
2640 | 2980 | Notes |
|
2641 | 2981 | ----- |
|
2642 | 2982 | This API is likely to be deprecated and replaced by |
|
2643 | 2983 | :any:`IPCompleter.completions` in the future. |
|
2644 | 2984 | |
|
2645 | 2985 | """ |
|
2646 | 2986 | warnings.warn('`Completer.complete` is pending deprecation since ' |
|
2647 | 2987 | 'IPython 6.0 and will be replaced by `Completer.completions`.', |
|
2648 | 2988 | PendingDeprecationWarning) |
|
2649 | 2989 | # potential todo, FOLD the 3rd throw away argument of _complete |
|
2650 | 2990 | # into the first 2 one. |
|
2651 | 2991 | # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?) |
|
2652 | 2992 | # TODO: should we deprecate now, or does it stay? |
|
2653 | 2993 | |
|
2654 | 2994 | results = self._complete( |
|
2655 | 2995 | line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0 |
|
2656 | 2996 | ) |
|
2657 | 2997 | |
|
2658 | 2998 | jedi_matcher_id = _get_matcher_id(self._jedi_matcher) |
|
2659 | 2999 | |
|
2660 | 3000 | return self._arrange_and_extract( |
|
2661 | 3001 | results, |
|
2662 | 3002 | # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version? |
|
2663 | 3003 | skip_matchers={jedi_matcher_id}, |
|
2664 | 3004 | # this API does not support different start/end positions (fragments of token). |
|
2665 | 3005 | abort_if_offset_changes=True, |
|
2666 | 3006 | ) |
|
2667 | 3007 | |
|
2668 | 3008 | def _arrange_and_extract( |
|
2669 | 3009 | self, |
|
2670 | 3010 | results: Dict[str, MatcherResult], |
|
2671 | 3011 | skip_matchers: Set[str], |
|
2672 | 3012 | abort_if_offset_changes: bool, |
|
2673 | 3013 | ): |
|
2674 | 3014 | |
|
2675 | sortable = [] | |
|
2676 | ordered = [] | |
|
3015 | sortable: List[AnyMatcherCompletion] = [] | |
|
3016 | ordered: List[AnyMatcherCompletion] = [] | |
|
2677 | 3017 | most_recent_fragment = None |
|
2678 | 3018 | for identifier, result in results.items(): |
|
2679 | 3019 | if identifier in skip_matchers: |
|
2680 | 3020 | continue |
|
2681 | 3021 | if not result["completions"]: |
|
2682 | 3022 | continue |
|
2683 | 3023 | if not most_recent_fragment: |
|
2684 | 3024 | most_recent_fragment = result["matched_fragment"] |
|
2685 | 3025 | if ( |
|
2686 | 3026 | abort_if_offset_changes |
|
2687 | 3027 | and result["matched_fragment"] != most_recent_fragment |
|
2688 | 3028 | ): |
|
2689 | 3029 | break |
|
2690 | 3030 | if result.get("ordered", False): |
|
2691 | 3031 | ordered.extend(result["completions"]) |
|
2692 | 3032 | else: |
|
2693 | 3033 | sortable.extend(result["completions"]) |
|
2694 | 3034 | |
|
2695 | 3035 | if not most_recent_fragment: |
|
2696 | 3036 | most_recent_fragment = "" # to satisfy typechecker (and just in case) |
|
2697 | 3037 | |
|
2698 | 3038 | return most_recent_fragment, [ |
|
2699 | 3039 | m.text for m in self._deduplicate(ordered + self._sort(sortable)) |
|
2700 | 3040 | ] |
|
2701 | 3041 | |
|
2702 | 3042 | def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None, |
|
2703 | 3043 | full_text=None) -> _CompleteResult: |
|
2704 | 3044 | """ |
|
2705 | 3045 | Like complete but can also returns raw jedi completions as well as the |
|
2706 | 3046 | origin of the completion text. This could (and should) be made much |
|
2707 | 3047 | cleaner but that will be simpler once we drop the old (and stateful) |
|
2708 | 3048 | :any:`complete` API. |
|
2709 | 3049 | |
|
2710 | 3050 | With current provisional API, cursor_pos act both (depending on the |
|
2711 | 3051 | caller) as the offset in the ``text`` or ``line_buffer``, or as the |
|
2712 | 3052 | ``column`` when passing multiline strings this could/should be renamed |
|
2713 | 3053 | but would add extra noise. |
|
2714 | 3054 | |
|
2715 | 3055 | Parameters |
|
2716 | 3056 | ---------- |
|
2717 | 3057 | cursor_line |
|
2718 | 3058 | Index of the line the cursor is on. 0 indexed. |
|
2719 | 3059 | cursor_pos |
|
2720 | 3060 | Position of the cursor in the current line/line_buffer/text. 0 |
|
2721 | 3061 | indexed. |
|
2722 | 3062 | line_buffer : optional, str |
|
2723 | 3063 | The current line the cursor is in, this is mostly due to legacy |
|
2724 | 3064 | reason that readline could only give a us the single current line. |
|
2725 | 3065 | Prefer `full_text`. |
|
2726 | 3066 | text : str |
|
2727 | 3067 | The current "token" the cursor is in, mostly also for historical |
|
2728 | 3068 | reasons. as the completer would trigger only after the current line |
|
2729 | 3069 | was parsed. |
|
2730 | 3070 | full_text : str |
|
2731 | 3071 | Full text of the current cell. |
|
2732 | 3072 | |
|
2733 | 3073 | Returns |
|
2734 | 3074 | ------- |
|
2735 | 3075 | An ordered dictionary where keys are identifiers of completion |
|
2736 | 3076 | matchers and values are ``MatcherResult``s. |
|
2737 | 3077 | """ |
|
2738 | 3078 | |
|
2739 | 3079 | # if the cursor position isn't given, the only sane assumption we can |
|
2740 | 3080 | # make is that it's at the end of the line (the common case) |
|
2741 | 3081 | if cursor_pos is None: |
|
2742 | 3082 | cursor_pos = len(line_buffer) if text is None else len(text) |
|
2743 | 3083 | |
|
2744 | 3084 | if self.use_main_ns: |
|
2745 | 3085 | self.namespace = __main__.__dict__ |
|
2746 | 3086 | |
|
2747 | 3087 | # if text is either None or an empty string, rely on the line buffer |
|
2748 | 3088 | if (not line_buffer) and full_text: |
|
2749 | 3089 | line_buffer = full_text.split('\n')[cursor_line] |
|
2750 | 3090 | if not text: # issue #11508: check line_buffer before calling split_line |
|
2751 | 3091 | text = ( |
|
2752 | 3092 | self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else "" |
|
2753 | 3093 | ) |
|
2754 | 3094 | |
|
2755 | 3095 | # If no line buffer is given, assume the input text is all there was |
|
2756 | 3096 | if line_buffer is None: |
|
2757 | 3097 | line_buffer = text |
|
2758 | 3098 | |
|
2759 | 3099 | # deprecated - do not use `line_buffer` in new code. |
|
2760 | 3100 | self.line_buffer = line_buffer |
|
2761 | 3101 | self.text_until_cursor = self.line_buffer[:cursor_pos] |
|
2762 | 3102 | |
|
2763 | 3103 | if not full_text: |
|
2764 | 3104 | full_text = line_buffer |
|
2765 | 3105 | |
|
2766 | 3106 | context = CompletionContext( |
|
2767 | 3107 | full_text=full_text, |
|
2768 | 3108 | cursor_position=cursor_pos, |
|
2769 | 3109 | cursor_line=cursor_line, |
|
2770 | 3110 | token=text, |
|
2771 | 3111 | limit=MATCHES_LIMIT, |
|
2772 | 3112 | ) |
|
2773 | 3113 | |
|
2774 | 3114 | # Start with a clean slate of completions |
|
2775 | results = {} | |
|
3115 | results: Dict[str, MatcherResult] = {} | |
|
2776 | 3116 | |
|
2777 | 3117 | jedi_matcher_id = _get_matcher_id(self._jedi_matcher) |
|
2778 | 3118 | |
|
2779 | suppressed_matchers = set() | |
|
3119 | suppressed_matchers: Set[str] = set() | |
|
2780 | 3120 | |
|
2781 | 3121 | matchers = { |
|
2782 | 3122 | _get_matcher_id(matcher): matcher |
|
2783 | 3123 | for matcher in sorted( |
|
2784 | 3124 | self.matchers, key=_get_matcher_priority, reverse=True |
|
2785 | 3125 | ) |
|
2786 | 3126 | } |
|
2787 | 3127 | |
|
2788 | 3128 | for matcher_id, matcher in matchers.items(): |
|
2789 | api_version = _get_matcher_api_version(matcher) | |
|
2790 | 3129 | matcher_id = _get_matcher_id(matcher) |
|
2791 | 3130 | |
|
2792 | 3131 | if matcher_id in self.disable_matchers: |
|
2793 | 3132 | continue |
|
2794 | 3133 | |
|
2795 | 3134 | if matcher_id in results: |
|
2796 | 3135 | warnings.warn(f"Duplicate matcher ID: {matcher_id}.") |
|
2797 | 3136 | |
|
2798 | 3137 | if matcher_id in suppressed_matchers: |
|
2799 | 3138 | continue |
|
2800 | 3139 | |
|
3140 | result: MatcherResult | |
|
2801 | 3141 | try: |
|
2802 |
if |
|
|
3142 | if _is_matcher_v1(matcher): | |
|
2803 | 3143 | result = _convert_matcher_v1_result_to_v2( |
|
2804 | 3144 | matcher(text), type=_UNKNOWN_TYPE |
|
2805 | 3145 | ) |
|
2806 |
elif |
|
|
2807 |
result = |
|
|
3146 | elif _is_matcher_v2(matcher): | |
|
3147 | result = matcher(context) | |
|
2808 | 3148 | else: |
|
3149 | api_version = _get_matcher_api_version(matcher) | |
|
2809 | 3150 | raise ValueError(f"Unsupported API version {api_version}") |
|
2810 | 3151 | except: |
|
2811 | 3152 | # Show the ugly traceback if the matcher causes an |
|
2812 | 3153 | # exception, but do NOT crash the kernel! |
|
2813 | 3154 | sys.excepthook(*sys.exc_info()) |
|
2814 | 3155 | continue |
|
2815 | 3156 | |
|
2816 | 3157 | # set default value for matched fragment if suffix was not selected. |
|
2817 | 3158 | result["matched_fragment"] = result.get("matched_fragment", context.token) |
|
2818 | 3159 | |
|
2819 | 3160 | if not suppressed_matchers: |
|
2820 |
suppression_recommended = result.get( |
|
|
3161 | suppression_recommended: Union[bool, Set[str]] = result.get( | |
|
3162 | "suppress", False | |
|
3163 | ) | |
|
2821 | 3164 | |
|
2822 | 3165 | suppression_config = ( |
|
2823 | 3166 | self.suppress_competing_matchers.get(matcher_id, None) |
|
2824 | 3167 | if isinstance(self.suppress_competing_matchers, dict) |
|
2825 | 3168 | else self.suppress_competing_matchers |
|
2826 | 3169 | ) |
|
2827 | 3170 | should_suppress = ( |
|
2828 | 3171 | (suppression_config is True) |
|
2829 | 3172 | or (suppression_recommended and (suppression_config is not False)) |
|
2830 | 3173 | ) and has_any_completions(result) |
|
2831 | 3174 | |
|
2832 | 3175 | if should_suppress: |
|
2833 |
suppression_exceptions = result.get( |
|
|
2834 |
|
|
|
3176 | suppression_exceptions: Set[str] = result.get( | |
|
3177 | "do_not_suppress", set() | |
|
3178 | ) | |
|
3179 | if isinstance(suppression_recommended, Iterable): | |
|
2835 | 3180 | to_suppress = set(suppression_recommended) |
|
2836 |
e |
|
|
3181 | else: | |
|
2837 | 3182 | to_suppress = set(matchers) |
|
2838 | 3183 | suppressed_matchers = to_suppress - suppression_exceptions |
|
2839 | 3184 | |
|
2840 | 3185 | new_results = {} |
|
2841 | 3186 | for previous_matcher_id, previous_result in results.items(): |
|
2842 | 3187 | if previous_matcher_id not in suppressed_matchers: |
|
2843 | 3188 | new_results[previous_matcher_id] = previous_result |
|
2844 | 3189 | results = new_results |
|
2845 | 3190 | |
|
2846 | 3191 | results[matcher_id] = result |
|
2847 | 3192 | |
|
2848 | 3193 | _, matches = self._arrange_and_extract( |
|
2849 | 3194 | results, |
|
2850 | 3195 | # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission? |
|
2851 | 3196 | # if it was omission, we can remove the filtering step, otherwise remove this comment. |
|
2852 | 3197 | skip_matchers={jedi_matcher_id}, |
|
2853 | 3198 | abort_if_offset_changes=False, |
|
2854 | 3199 | ) |
|
2855 | 3200 | |
|
2856 | 3201 | # populate legacy stateful API |
|
2857 | 3202 | self.matches = matches |
|
2858 | 3203 | |
|
2859 | 3204 | return results |
|
2860 | 3205 | |
|
2861 | 3206 | @staticmethod |
|
2862 | 3207 | def _deduplicate( |
|
2863 |
matches: Sequence[ |
|
|
2864 |
) -> Iterable[ |
|
|
2865 | filtered_matches = {} | |
|
3208 | matches: Sequence[AnyCompletion], | |
|
3209 | ) -> Iterable[AnyCompletion]: | |
|
3210 | filtered_matches: Dict[str, AnyCompletion] = {} | |
|
2866 | 3211 | for match in matches: |
|
2867 | 3212 | text = match.text |
|
2868 | 3213 | if ( |
|
2869 | 3214 | text not in filtered_matches |
|
2870 | 3215 | or filtered_matches[text].type == _UNKNOWN_TYPE |
|
2871 | 3216 | ): |
|
2872 | 3217 | filtered_matches[text] = match |
|
2873 | 3218 | |
|
2874 | 3219 | return filtered_matches.values() |
|
2875 | 3220 | |
|
2876 | 3221 | @staticmethod |
|
2877 |
def _sort(matches: Sequence[ |
|
|
3222 | def _sort(matches: Sequence[AnyCompletion]): | |
|
2878 | 3223 | return sorted(matches, key=lambda x: completions_sorting_key(x.text)) |
|
2879 | 3224 | |
|
2880 | 3225 | @context_matcher() |
|
2881 | 3226 | def fwd_unicode_matcher(self, context: CompletionContext): |
|
2882 | 3227 | """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API.""" |
|
2883 | 3228 | # TODO: use `context.limit` to terminate early once we matched the maximum |
|
2884 | 3229 | # number that will be used downstream; can be added as an optional to |
|
2885 | 3230 | # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here. |
|
2886 | 3231 | fragment, matches = self.fwd_unicode_match(context.text_until_cursor) |
|
2887 | 3232 | return _convert_matcher_v1_result_to_v2( |
|
2888 | 3233 | matches, type="unicode", fragment=fragment, suppress_if_matches=True |
|
2889 | 3234 | ) |
|
2890 | 3235 | |
|
2891 | 3236 | def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]: |
|
2892 | 3237 | """ |
|
2893 | 3238 | Forward match a string starting with a backslash with a list of |
|
2894 | 3239 | potential Unicode completions. |
|
2895 | 3240 | |
|
2896 | 3241 | Will compute list of Unicode character names on first call and cache it. |
|
2897 | 3242 | |
|
2898 | 3243 | .. deprecated:: 8.6 |
|
2899 | 3244 | You can use :meth:`fwd_unicode_matcher` instead. |
|
2900 | 3245 | |
|
2901 | 3246 | Returns |
|
2902 | 3247 | ------- |
|
2903 | 3248 | At tuple with: |
|
2904 | 3249 | - matched text (empty if no matches) |
|
2905 | 3250 | - list of potential completions, empty tuple otherwise) |
|
2906 | 3251 | """ |
|
2907 | 3252 | # TODO: self.unicode_names is here a list we traverse each time with ~100k elements. |
|
2908 | 3253 | # We could do a faster match using a Trie. |
|
2909 | 3254 | |
|
2910 | 3255 | # Using pygtrie the following seem to work: |
|
2911 | 3256 | |
|
2912 | 3257 | # s = PrefixSet() |
|
2913 | 3258 | |
|
2914 | 3259 | # for c in range(0,0x10FFFF + 1): |
|
2915 | 3260 | # try: |
|
2916 | 3261 | # s.add(unicodedata.name(chr(c))) |
|
2917 | 3262 | # except ValueError: |
|
2918 | 3263 | # pass |
|
2919 | 3264 | # [''.join(k) for k in s.iter(prefix)] |
|
2920 | 3265 | |
|
2921 | 3266 | # But need to be timed and adds an extra dependency. |
|
2922 | 3267 | |
|
2923 | 3268 | slashpos = text.rfind('\\') |
|
2924 | 3269 | # if text starts with slash |
|
2925 | 3270 | if slashpos > -1: |
|
2926 | 3271 | # PERF: It's important that we don't access self._unicode_names |
|
2927 | 3272 | # until we're inside this if-block. _unicode_names is lazily |
|
2928 | 3273 | # initialized, and it takes a user-noticeable amount of time to |
|
2929 | 3274 | # initialize it, so we don't want to initialize it unless we're |
|
2930 | 3275 | # actually going to use it. |
|
2931 | 3276 | s = text[slashpos + 1 :] |
|
2932 | 3277 | sup = s.upper() |
|
2933 | 3278 | candidates = [x for x in self.unicode_names if x.startswith(sup)] |
|
2934 | 3279 | if candidates: |
|
2935 | 3280 | return s, candidates |
|
2936 | 3281 | candidates = [x for x in self.unicode_names if sup in x] |
|
2937 | 3282 | if candidates: |
|
2938 | 3283 | return s, candidates |
|
2939 | 3284 | splitsup = sup.split(" ") |
|
2940 | 3285 | candidates = [ |
|
2941 | 3286 | x for x in self.unicode_names if all(u in x for u in splitsup) |
|
2942 | 3287 | ] |
|
2943 | 3288 | if candidates: |
|
2944 | 3289 | return s, candidates |
|
2945 | 3290 | |
|
2946 | 3291 | return "", () |
|
2947 | 3292 | |
|
2948 | 3293 | # if text does not start with slash |
|
2949 | 3294 | else: |
|
2950 | 3295 | return '', () |
|
2951 | 3296 | |
|
2952 | 3297 | @property |
|
2953 | 3298 | def unicode_names(self) -> List[str]: |
|
2954 | 3299 | """List of names of unicode code points that can be completed. |
|
2955 | 3300 | |
|
2956 | 3301 | The list is lazily initialized on first access. |
|
2957 | 3302 | """ |
|
2958 | 3303 | if self._unicode_names is None: |
|
2959 | 3304 | names = [] |
|
2960 | 3305 | for c in range(0,0x10FFFF + 1): |
|
2961 | 3306 | try: |
|
2962 | 3307 | names.append(unicodedata.name(chr(c))) |
|
2963 | 3308 | except ValueError: |
|
2964 | 3309 | pass |
|
2965 | 3310 | self._unicode_names = _unicode_name_compute(_UNICODE_RANGES) |
|
2966 | 3311 | |
|
2967 | 3312 | return self._unicode_names |
|
2968 | 3313 | |
|
2969 | 3314 | def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]: |
|
2970 | 3315 | names = [] |
|
2971 | 3316 | for start,stop in ranges: |
|
2972 | 3317 | for c in range(start, stop) : |
|
2973 | 3318 | try: |
|
2974 | 3319 | names.append(unicodedata.name(chr(c))) |
|
2975 | 3320 | except ValueError: |
|
2976 | 3321 | pass |
|
2977 | 3322 | return names |
@@ -1,3852 +1,3861 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Main IPython class.""" |
|
3 | 3 | |
|
4 | 4 | #----------------------------------------------------------------------------- |
|
5 | 5 | # Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> |
|
6 | 6 | # Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu> |
|
7 | 7 | # Copyright (C) 2008-2011 The IPython Development Team |
|
8 | 8 | # |
|
9 | 9 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | 10 | # the file COPYING, distributed as part of this software. |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | |
|
13 | 13 | |
|
14 | 14 | import abc |
|
15 | 15 | import ast |
|
16 | 16 | import atexit |
|
17 | 17 | import bdb |
|
18 | 18 | import builtins as builtin_mod |
|
19 | 19 | import functools |
|
20 | 20 | import inspect |
|
21 | 21 | import os |
|
22 | 22 | import re |
|
23 | 23 | import runpy |
|
24 | 24 | import subprocess |
|
25 | 25 | import sys |
|
26 | 26 | import tempfile |
|
27 | 27 | import traceback |
|
28 | 28 | import types |
|
29 | 29 | import warnings |
|
30 | 30 | from ast import stmt |
|
31 | 31 | from io import open as io_open |
|
32 | 32 | from logging import error |
|
33 | 33 | from pathlib import Path |
|
34 | 34 | from typing import Callable |
|
35 | 35 | from typing import List as ListType |
|
36 | 36 | from typing import Optional, Tuple |
|
37 | 37 | from warnings import warn |
|
38 | 38 | |
|
39 | 39 | from pickleshare import PickleShareDB |
|
40 | 40 | from tempfile import TemporaryDirectory |
|
41 | 41 | from traitlets import ( |
|
42 | 42 | Any, |
|
43 | 43 | Bool, |
|
44 | 44 | CaselessStrEnum, |
|
45 | 45 | Dict, |
|
46 | 46 | Enum, |
|
47 | 47 | Instance, |
|
48 | 48 | Integer, |
|
49 | 49 | List, |
|
50 | 50 | Type, |
|
51 | 51 | Unicode, |
|
52 | 52 | default, |
|
53 | 53 | observe, |
|
54 | 54 | validate, |
|
55 | 55 | ) |
|
56 | 56 | from traitlets.config.configurable import SingletonConfigurable |
|
57 | 57 | from traitlets.utils.importstring import import_item |
|
58 | 58 | |
|
59 | 59 | import IPython.core.hooks |
|
60 | 60 | from IPython.core import magic, oinspect, page, prefilter, ultratb |
|
61 | 61 | from IPython.core.alias import Alias, AliasManager |
|
62 | 62 | from IPython.core.autocall import ExitAutocall |
|
63 | 63 | from IPython.core.builtin_trap import BuiltinTrap |
|
64 | 64 | from IPython.core.compilerop import CachingCompiler |
|
65 | 65 | from IPython.core.debugger import InterruptiblePdb |
|
66 | 66 | from IPython.core.display_trap import DisplayTrap |
|
67 | 67 | from IPython.core.displayhook import DisplayHook |
|
68 | 68 | from IPython.core.displaypub import DisplayPublisher |
|
69 | 69 | from IPython.core.error import InputRejected, UsageError |
|
70 | 70 | from IPython.core.events import EventManager, available_events |
|
71 | 71 | from IPython.core.extensions import ExtensionManager |
|
72 | 72 | from IPython.core.formatters import DisplayFormatter |
|
73 | 73 | from IPython.core.history import HistoryManager |
|
74 | 74 | from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2 |
|
75 | 75 | from IPython.core.logger import Logger |
|
76 | 76 | from IPython.core.macro import Macro |
|
77 | 77 | from IPython.core.payload import PayloadManager |
|
78 | 78 | from IPython.core.prefilter import PrefilterManager |
|
79 | 79 | from IPython.core.profiledir import ProfileDir |
|
80 | 80 | from IPython.core.usage import default_banner |
|
81 | 81 | from IPython.display import display |
|
82 | 82 | from IPython.paths import get_ipython_dir |
|
83 | 83 | from IPython.testing.skipdoctest import skip_doctest |
|
84 | 84 | from IPython.utils import PyColorize, io, openpy, py3compat |
|
85 | 85 | from IPython.utils.decorators import undoc |
|
86 | 86 | from IPython.utils.io import ask_yes_no |
|
87 | 87 | from IPython.utils.ipstruct import Struct |
|
88 | 88 | from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename |
|
89 | 89 | from IPython.utils.process import getoutput, system |
|
90 | 90 | from IPython.utils.strdispatch import StrDispatch |
|
91 | 91 | from IPython.utils.syspathcontext import prepended_to_syspath |
|
92 | 92 | from IPython.utils.text import DollarFormatter, LSString, SList, format_screen |
|
93 | 93 | |
|
94 | 94 | sphinxify: Optional[Callable] |
|
95 | 95 | |
|
96 | 96 | try: |
|
97 | 97 | import docrepr.sphinxify as sphx |
|
98 | 98 | |
|
99 | 99 | def sphinxify(oinfo): |
|
100 | 100 | wrapped_docstring = sphx.wrap_main_docstring(oinfo) |
|
101 | 101 | |
|
102 | 102 | def sphinxify_docstring(docstring): |
|
103 | 103 | with TemporaryDirectory() as dirname: |
|
104 | 104 | return { |
|
105 | 105 | "text/html": sphx.sphinxify(wrapped_docstring, dirname), |
|
106 | 106 | "text/plain": docstring, |
|
107 | 107 | } |
|
108 | 108 | |
|
109 | 109 | return sphinxify_docstring |
|
110 | 110 | except ImportError: |
|
111 | 111 | sphinxify = None |
|
112 | 112 | |
|
113 | 113 | |
|
114 | 114 | class ProvisionalWarning(DeprecationWarning): |
|
115 | 115 | """ |
|
116 | 116 | Warning class for unstable features |
|
117 | 117 | """ |
|
118 | 118 | pass |
|
119 | 119 | |
|
120 | 120 | from ast import Module |
|
121 | 121 | |
|
122 | 122 | _assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign) |
|
123 | 123 | _single_targets_nodes = (ast.AugAssign, ast.AnnAssign) |
|
124 | 124 | |
|
125 | 125 | #----------------------------------------------------------------------------- |
|
126 | 126 | # Await Helpers |
|
127 | 127 | #----------------------------------------------------------------------------- |
|
128 | 128 | |
|
129 | 129 | # we still need to run things using the asyncio eventloop, but there is no |
|
130 | 130 | # async integration |
|
131 | 131 | from .async_helpers import ( |
|
132 | 132 | _asyncio_runner, |
|
133 | 133 | _curio_runner, |
|
134 | 134 | _pseudo_sync_runner, |
|
135 | 135 | _should_be_async, |
|
136 | 136 | _trio_runner, |
|
137 | 137 | ) |
|
138 | 138 | |
|
139 | 139 | #----------------------------------------------------------------------------- |
|
140 | 140 | # Globals |
|
141 | 141 | #----------------------------------------------------------------------------- |
|
142 | 142 | |
|
143 | 143 | # compiled regexps for autoindent management |
|
144 | 144 | dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass') |
|
145 | 145 | |
|
146 | 146 | #----------------------------------------------------------------------------- |
|
147 | 147 | # Utilities |
|
148 | 148 | #----------------------------------------------------------------------------- |
|
149 | 149 | |
|
150 | 150 | |
|
151 | 151 | def is_integer_string(s: str): |
|
152 | 152 | """ |
|
153 | 153 | Variant of "str.isnumeric()" that allow negative values and other ints. |
|
154 | 154 | """ |
|
155 | 155 | try: |
|
156 | 156 | int(s) |
|
157 | 157 | return True |
|
158 | 158 | except ValueError: |
|
159 | 159 | return False |
|
160 | 160 | raise ValueError("Unexpected error") |
|
161 | 161 | |
|
162 | 162 | |
|
163 | 163 | @undoc |
|
164 | 164 | def softspace(file, newvalue): |
|
165 | 165 | """Copied from code.py, to remove the dependency""" |
|
166 | 166 | |
|
167 | 167 | oldvalue = 0 |
|
168 | 168 | try: |
|
169 | 169 | oldvalue = file.softspace |
|
170 | 170 | except AttributeError: |
|
171 | 171 | pass |
|
172 | 172 | try: |
|
173 | 173 | file.softspace = newvalue |
|
174 | 174 | except (AttributeError, TypeError): |
|
175 | 175 | # "attribute-less object" or "read-only attributes" |
|
176 | 176 | pass |
|
177 | 177 | return oldvalue |
|
178 | 178 | |
|
179 | 179 | @undoc |
|
180 | 180 | def no_op(*a, **kw): |
|
181 | 181 | pass |
|
182 | 182 | |
|
183 | 183 | |
|
184 | 184 | class SpaceInInput(Exception): pass |
|
185 | 185 | |
|
186 | 186 | |
|
187 | 187 | class SeparateUnicode(Unicode): |
|
188 | 188 | r"""A Unicode subclass to validate separate_in, separate_out, etc. |
|
189 | 189 | |
|
190 | 190 | This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``. |
|
191 | 191 | """ |
|
192 | 192 | |
|
193 | 193 | def validate(self, obj, value): |
|
194 | 194 | if value == '0': value = '' |
|
195 | 195 | value = value.replace('\\n','\n') |
|
196 | 196 | return super(SeparateUnicode, self).validate(obj, value) |
|
197 | 197 | |
|
198 | 198 | |
|
199 | 199 | @undoc |
|
200 | 200 | class DummyMod(object): |
|
201 | 201 | """A dummy module used for IPython's interactive module when |
|
202 | 202 | a namespace must be assigned to the module's __dict__.""" |
|
203 | 203 | __spec__ = None |
|
204 | 204 | |
|
205 | 205 | |
|
206 | 206 | class ExecutionInfo(object): |
|
207 | 207 | """The arguments used for a call to :meth:`InteractiveShell.run_cell` |
|
208 | 208 | |
|
209 | 209 | Stores information about what is going to happen. |
|
210 | 210 | """ |
|
211 | 211 | raw_cell = None |
|
212 | 212 | store_history = False |
|
213 | 213 | silent = False |
|
214 | 214 | shell_futures = True |
|
215 | 215 | cell_id = None |
|
216 | 216 | |
|
217 | 217 | def __init__(self, raw_cell, store_history, silent, shell_futures, cell_id): |
|
218 | 218 | self.raw_cell = raw_cell |
|
219 | 219 | self.store_history = store_history |
|
220 | 220 | self.silent = silent |
|
221 | 221 | self.shell_futures = shell_futures |
|
222 | 222 | self.cell_id = cell_id |
|
223 | 223 | |
|
224 | 224 | def __repr__(self): |
|
225 | 225 | name = self.__class__.__qualname__ |
|
226 | 226 | raw_cell = ( |
|
227 | 227 | (self.raw_cell[:50] + "..") if len(self.raw_cell) > 50 else self.raw_cell |
|
228 | 228 | ) |
|
229 | 229 | return ( |
|
230 | 230 | '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s cell_id=%s>' |
|
231 | 231 | % ( |
|
232 | 232 | name, |
|
233 | 233 | id(self), |
|
234 | 234 | raw_cell, |
|
235 | 235 | self.store_history, |
|
236 | 236 | self.silent, |
|
237 | 237 | self.shell_futures, |
|
238 | 238 | self.cell_id, |
|
239 | 239 | ) |
|
240 | 240 | ) |
|
241 | 241 | |
|
242 | 242 | |
|
243 | 243 | class ExecutionResult(object): |
|
244 | 244 | """The result of a call to :meth:`InteractiveShell.run_cell` |
|
245 | 245 | |
|
246 | 246 | Stores information about what took place. |
|
247 | 247 | """ |
|
248 | 248 | execution_count = None |
|
249 | 249 | error_before_exec = None |
|
250 | 250 | error_in_exec: Optional[BaseException] = None |
|
251 | 251 | info = None |
|
252 | 252 | result = None |
|
253 | 253 | |
|
254 | 254 | def __init__(self, info): |
|
255 | 255 | self.info = info |
|
256 | 256 | |
|
257 | 257 | @property |
|
258 | 258 | def success(self): |
|
259 | 259 | return (self.error_before_exec is None) and (self.error_in_exec is None) |
|
260 | 260 | |
|
261 | 261 | def raise_error(self): |
|
262 | 262 | """Reraises error if `success` is `False`, otherwise does nothing""" |
|
263 | 263 | if self.error_before_exec is not None: |
|
264 | 264 | raise self.error_before_exec |
|
265 | 265 | if self.error_in_exec is not None: |
|
266 | 266 | raise self.error_in_exec |
|
267 | 267 | |
|
268 | 268 | def __repr__(self): |
|
269 | 269 | name = self.__class__.__qualname__ |
|
270 | 270 | return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\ |
|
271 | 271 | (name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result)) |
|
272 | 272 | |
|
273 | 273 | @functools.wraps(io_open) |
|
274 | 274 | def _modified_open(file, *args, **kwargs): |
|
275 | 275 | if file in {0, 1, 2}: |
|
276 | 276 | raise ValueError( |
|
277 | 277 | f"IPython won't let you open fd={file} by default " |
|
278 | 278 | "as it is likely to crash IPython. If you know what you are doing, " |
|
279 | 279 | "you can use builtins' open." |
|
280 | 280 | ) |
|
281 | 281 | |
|
282 | 282 | return io_open(file, *args, **kwargs) |
|
283 | 283 | |
|
284 | 284 | class InteractiveShell(SingletonConfigurable): |
|
285 | 285 | """An enhanced, interactive shell for Python.""" |
|
286 | 286 | |
|
287 | 287 | _instance = None |
|
288 | 288 | |
|
289 | 289 | ast_transformers = List([], help= |
|
290 | 290 | """ |
|
291 | 291 | A list of ast.NodeTransformer subclass instances, which will be applied |
|
292 | 292 | to user input before code is run. |
|
293 | 293 | """ |
|
294 | 294 | ).tag(config=True) |
|
295 | 295 | |
|
296 | 296 | autocall = Enum((0,1,2), default_value=0, help= |
|
297 | 297 | """ |
|
298 | 298 | Make IPython automatically call any callable object even if you didn't |
|
299 | 299 | type explicit parentheses. For example, 'str 43' becomes 'str(43)' |
|
300 | 300 | automatically. The value can be '0' to disable the feature, '1' for |
|
301 | 301 | 'smart' autocall, where it is not applied if there are no more |
|
302 | 302 | arguments on the line, and '2' for 'full' autocall, where all callable |
|
303 | 303 | objects are automatically called (even if no arguments are present). |
|
304 | 304 | """ |
|
305 | 305 | ).tag(config=True) |
|
306 | 306 | |
|
307 | 307 | autoindent = Bool(True, help= |
|
308 | 308 | """ |
|
309 | 309 | Autoindent IPython code entered interactively. |
|
310 | 310 | """ |
|
311 | 311 | ).tag(config=True) |
|
312 | 312 | |
|
313 | 313 | autoawait = Bool(True, help= |
|
314 | 314 | """ |
|
315 | 315 | Automatically run await statement in the top level repl. |
|
316 | 316 | """ |
|
317 | 317 | ).tag(config=True) |
|
318 | 318 | |
|
319 | 319 | loop_runner_map ={ |
|
320 | 320 | 'asyncio':(_asyncio_runner, True), |
|
321 | 321 | 'curio':(_curio_runner, True), |
|
322 | 322 | 'trio':(_trio_runner, True), |
|
323 | 323 | 'sync': (_pseudo_sync_runner, False) |
|
324 | 324 | } |
|
325 | 325 | |
|
326 | 326 | loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner", |
|
327 | 327 | allow_none=True, |
|
328 | 328 | help="""Select the loop runner that will be used to execute top-level asynchronous code""" |
|
329 | 329 | ).tag(config=True) |
|
330 | 330 | |
|
331 | 331 | @default('loop_runner') |
|
332 | 332 | def _default_loop_runner(self): |
|
333 | 333 | return import_item("IPython.core.interactiveshell._asyncio_runner") |
|
334 | 334 | |
|
335 | 335 | @validate('loop_runner') |
|
336 | 336 | def _import_runner(self, proposal): |
|
337 | 337 | if isinstance(proposal.value, str): |
|
338 | 338 | if proposal.value in self.loop_runner_map: |
|
339 | 339 | runner, autoawait = self.loop_runner_map[proposal.value] |
|
340 | 340 | self.autoawait = autoawait |
|
341 | 341 | return runner |
|
342 | 342 | runner = import_item(proposal.value) |
|
343 | 343 | if not callable(runner): |
|
344 | 344 | raise ValueError('loop_runner must be callable') |
|
345 | 345 | return runner |
|
346 | 346 | if not callable(proposal.value): |
|
347 | 347 | raise ValueError('loop_runner must be callable') |
|
348 | 348 | return proposal.value |
|
349 | 349 | |
|
350 | 350 | automagic = Bool(True, help= |
|
351 | 351 | """ |
|
352 | 352 | Enable magic commands to be called without the leading %. |
|
353 | 353 | """ |
|
354 | 354 | ).tag(config=True) |
|
355 | 355 | |
|
356 | 356 | banner1 = Unicode(default_banner, |
|
357 | 357 | help="""The part of the banner to be printed before the profile""" |
|
358 | 358 | ).tag(config=True) |
|
359 | 359 | banner2 = Unicode('', |
|
360 | 360 | help="""The part of the banner to be printed after the profile""" |
|
361 | 361 | ).tag(config=True) |
|
362 | 362 | |
|
363 | 363 | cache_size = Integer(1000, help= |
|
364 | 364 | """ |
|
365 | 365 | Set the size of the output cache. The default is 1000, you can |
|
366 | 366 | change it permanently in your config file. Setting it to 0 completely |
|
367 | 367 | disables the caching system, and the minimum value accepted is 3 (if |
|
368 | 368 | you provide a value less than 3, it is reset to 0 and a warning is |
|
369 | 369 | issued). This limit is defined because otherwise you'll spend more |
|
370 | 370 | time re-flushing a too small cache than working |
|
371 | 371 | """ |
|
372 | 372 | ).tag(config=True) |
|
373 | 373 | color_info = Bool(True, help= |
|
374 | 374 | """ |
|
375 | 375 | Use colors for displaying information about objects. Because this |
|
376 | 376 | information is passed through a pager (like 'less'), and some pagers |
|
377 | 377 | get confused with color codes, this capability can be turned off. |
|
378 | 378 | """ |
|
379 | 379 | ).tag(config=True) |
|
380 | 380 | colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'), |
|
381 | 381 | default_value='Neutral', |
|
382 | 382 | help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)." |
|
383 | 383 | ).tag(config=True) |
|
384 | 384 | debug = Bool(False).tag(config=True) |
|
385 | 385 | disable_failing_post_execute = Bool(False, |
|
386 | 386 | help="Don't call post-execute functions that have failed in the past." |
|
387 | 387 | ).tag(config=True) |
|
388 | 388 | display_formatter = Instance(DisplayFormatter, allow_none=True) |
|
389 | 389 | displayhook_class = Type(DisplayHook) |
|
390 | 390 | display_pub_class = Type(DisplayPublisher) |
|
391 | 391 | compiler_class = Type(CachingCompiler) |
|
392 | inspector_class = Type( | |
|
393 | oinspect.Inspector, help="Class to use to instantiate the shell inspector" | |
|
394 | ).tag(config=True) | |
|
392 | 395 | |
|
393 | 396 | sphinxify_docstring = Bool(False, help= |
|
394 | 397 | """ |
|
395 | 398 | Enables rich html representation of docstrings. (This requires the |
|
396 | 399 | docrepr module). |
|
397 | 400 | """).tag(config=True) |
|
398 | 401 | |
|
399 | 402 | @observe("sphinxify_docstring") |
|
400 | 403 | def _sphinxify_docstring_changed(self, change): |
|
401 | 404 | if change['new']: |
|
402 | 405 | warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning) |
|
403 | 406 | |
|
404 | 407 | enable_html_pager = Bool(False, help= |
|
405 | 408 | """ |
|
406 | 409 | (Provisional API) enables html representation in mime bundles sent |
|
407 | 410 | to pagers. |
|
408 | 411 | """).tag(config=True) |
|
409 | 412 | |
|
410 | 413 | @observe("enable_html_pager") |
|
411 | 414 | def _enable_html_pager_changed(self, change): |
|
412 | 415 | if change['new']: |
|
413 | 416 | warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning) |
|
414 | 417 | |
|
415 | 418 | data_pub_class = None |
|
416 | 419 | |
|
417 | 420 | exit_now = Bool(False) |
|
418 | 421 | exiter = Instance(ExitAutocall) |
|
419 | 422 | @default('exiter') |
|
420 | 423 | def _exiter_default(self): |
|
421 | 424 | return ExitAutocall(self) |
|
422 | 425 | # Monotonically increasing execution counter |
|
423 | 426 | execution_count = Integer(1) |
|
424 | 427 | filename = Unicode("<ipython console>") |
|
425 | 428 | ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__ |
|
426 | 429 | |
|
427 | 430 | # Used to transform cells before running them, and check whether code is complete |
|
428 | 431 | input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager', |
|
429 | 432 | ()) |
|
430 | 433 | |
|
431 | 434 | @property |
|
432 | 435 | def input_transformers_cleanup(self): |
|
433 | 436 | return self.input_transformer_manager.cleanup_transforms |
|
434 | 437 | |
|
435 | 438 | input_transformers_post = List([], |
|
436 | 439 | help="A list of string input transformers, to be applied after IPython's " |
|
437 | 440 | "own input transformations." |
|
438 | 441 | ) |
|
439 | 442 | |
|
440 | 443 | @property |
|
441 | 444 | def input_splitter(self): |
|
442 | 445 | """Make this available for backward compatibility (pre-7.0 release) with existing code. |
|
443 | 446 | |
|
444 | 447 | For example, ipykernel ipykernel currently uses |
|
445 | 448 | `shell.input_splitter.check_complete` |
|
446 | 449 | """ |
|
447 | 450 | from warnings import warn |
|
448 | 451 | warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.", |
|
449 | 452 | DeprecationWarning, stacklevel=2 |
|
450 | 453 | ) |
|
451 | 454 | return self.input_transformer_manager |
|
452 | 455 | |
|
453 | 456 | logstart = Bool(False, help= |
|
454 | 457 | """ |
|
455 | 458 | Start logging to the default log file in overwrite mode. |
|
456 | 459 | Use `logappend` to specify a log file to **append** logs to. |
|
457 | 460 | """ |
|
458 | 461 | ).tag(config=True) |
|
459 | 462 | logfile = Unicode('', help= |
|
460 | 463 | """ |
|
461 | 464 | The name of the logfile to use. |
|
462 | 465 | """ |
|
463 | 466 | ).tag(config=True) |
|
464 | 467 | logappend = Unicode('', help= |
|
465 | 468 | """ |
|
466 | 469 | Start logging to the given file in append mode. |
|
467 | 470 | Use `logfile` to specify a log file to **overwrite** logs to. |
|
468 | 471 | """ |
|
469 | 472 | ).tag(config=True) |
|
470 | 473 | object_info_string_level = Enum((0,1,2), default_value=0, |
|
471 | 474 | ).tag(config=True) |
|
472 | 475 | pdb = Bool(False, help= |
|
473 | 476 | """ |
|
474 | 477 | Automatically call the pdb debugger after every exception. |
|
475 | 478 | """ |
|
476 | 479 | ).tag(config=True) |
|
477 | 480 | display_page = Bool(False, |
|
478 | 481 | help="""If True, anything that would be passed to the pager |
|
479 | 482 | will be displayed as regular output instead.""" |
|
480 | 483 | ).tag(config=True) |
|
481 | 484 | |
|
482 | 485 | |
|
483 | 486 | show_rewritten_input = Bool(True, |
|
484 | 487 | help="Show rewritten input, e.g. for autocall." |
|
485 | 488 | ).tag(config=True) |
|
486 | 489 | |
|
487 | 490 | quiet = Bool(False).tag(config=True) |
|
488 | 491 | |
|
489 | 492 | history_length = Integer(10000, |
|
490 | 493 | help='Total length of command history' |
|
491 | 494 | ).tag(config=True) |
|
492 | 495 | |
|
493 | 496 | history_load_length = Integer(1000, help= |
|
494 | 497 | """ |
|
495 | 498 | The number of saved history entries to be loaded |
|
496 | 499 | into the history buffer at startup. |
|
497 | 500 | """ |
|
498 | 501 | ).tag(config=True) |
|
499 | 502 | |
|
500 | 503 | ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'], |
|
501 | 504 | default_value='last_expr', |
|
502 | 505 | help=""" |
|
503 | 506 | 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying |
|
504 | 507 | which nodes should be run interactively (displaying output from expressions). |
|
505 | 508 | """ |
|
506 | 509 | ).tag(config=True) |
|
507 | 510 | |
|
508 | 511 | warn_venv = Bool( |
|
509 | 512 | True, |
|
510 | 513 | help="Warn if running in a virtual environment with no IPython installed (so IPython from the global environment is used).", |
|
511 | 514 | ).tag(config=True) |
|
512 | 515 | |
|
513 | 516 | # TODO: this part of prompt management should be moved to the frontends. |
|
514 | 517 | # Use custom TraitTypes that convert '0'->'' and '\\n'->'\n' |
|
515 | 518 | separate_in = SeparateUnicode('\n').tag(config=True) |
|
516 | 519 | separate_out = SeparateUnicode('').tag(config=True) |
|
517 | 520 | separate_out2 = SeparateUnicode('').tag(config=True) |
|
518 | 521 | wildcards_case_sensitive = Bool(True).tag(config=True) |
|
519 | 522 | xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'), |
|
520 | 523 | default_value='Context', |
|
521 | 524 | help="Switch modes for the IPython exception handlers." |
|
522 | 525 | ).tag(config=True) |
|
523 | 526 | |
|
524 | 527 | # Subcomponents of InteractiveShell |
|
525 | 528 | alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True) |
|
526 | 529 | prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True) |
|
527 | 530 | builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True) |
|
528 | 531 | display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True) |
|
529 | 532 | extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True) |
|
530 | 533 | payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True) |
|
531 | 534 | history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True) |
|
532 | 535 | magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True) |
|
533 | 536 | |
|
534 | 537 | profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True) |
|
535 | 538 | @property |
|
536 | 539 | def profile(self): |
|
537 | 540 | if self.profile_dir is not None: |
|
538 | 541 | name = os.path.basename(self.profile_dir.location) |
|
539 | 542 | return name.replace('profile_','') |
|
540 | 543 | |
|
541 | 544 | |
|
542 | 545 | # Private interface |
|
543 | 546 | _post_execute = Dict() |
|
544 | 547 | |
|
545 | 548 | # Tracks any GUI loop loaded for pylab |
|
546 | 549 | pylab_gui_select = None |
|
547 | 550 | |
|
548 | 551 | last_execution_succeeded = Bool(True, help='Did last executed command succeeded') |
|
549 | 552 | |
|
550 | 553 | last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True) |
|
551 | 554 | |
|
552 | 555 | def __init__(self, ipython_dir=None, profile_dir=None, |
|
553 | 556 | user_module=None, user_ns=None, |
|
554 | 557 | custom_exceptions=((), None), **kwargs): |
|
555 | 558 | # This is where traits with a config_key argument are updated |
|
556 | 559 | # from the values on config. |
|
557 | 560 | super(InteractiveShell, self).__init__(**kwargs) |
|
558 | 561 | if 'PromptManager' in self.config: |
|
559 | 562 | warn('As of IPython 5.0 `PromptManager` config will have no effect' |
|
560 | 563 | ' and has been replaced by TerminalInteractiveShell.prompts_class') |
|
561 | 564 | self.configurables = [self] |
|
562 | 565 | |
|
563 | 566 | # These are relatively independent and stateless |
|
564 | 567 | self.init_ipython_dir(ipython_dir) |
|
565 | 568 | self.init_profile_dir(profile_dir) |
|
566 | 569 | self.init_instance_attrs() |
|
567 | 570 | self.init_environment() |
|
568 | 571 | |
|
569 | 572 | # Check if we're in a virtualenv, and set up sys.path. |
|
570 | 573 | self.init_virtualenv() |
|
571 | 574 | |
|
572 | 575 | # Create namespaces (user_ns, user_global_ns, etc.) |
|
573 | 576 | self.init_create_namespaces(user_module, user_ns) |
|
574 | 577 | # This has to be done after init_create_namespaces because it uses |
|
575 | 578 | # something in self.user_ns, but before init_sys_modules, which |
|
576 | 579 | # is the first thing to modify sys. |
|
577 | 580 | # TODO: When we override sys.stdout and sys.stderr before this class |
|
578 | 581 | # is created, we are saving the overridden ones here. Not sure if this |
|
579 | 582 | # is what we want to do. |
|
580 | 583 | self.save_sys_module_state() |
|
581 | 584 | self.init_sys_modules() |
|
582 | 585 | |
|
583 | 586 | # While we're trying to have each part of the code directly access what |
|
584 | 587 | # it needs without keeping redundant references to objects, we have too |
|
585 | 588 | # much legacy code that expects ip.db to exist. |
|
586 | 589 | self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db')) |
|
587 | 590 | |
|
588 | 591 | self.init_history() |
|
589 | 592 | self.init_encoding() |
|
590 | 593 | self.init_prefilter() |
|
591 | 594 | |
|
592 | 595 | self.init_syntax_highlighting() |
|
593 | 596 | self.init_hooks() |
|
594 | 597 | self.init_events() |
|
595 | 598 | self.init_pushd_popd_magic() |
|
596 | 599 | self.init_user_ns() |
|
597 | 600 | self.init_logger() |
|
598 | 601 | self.init_builtins() |
|
599 | 602 | |
|
600 | 603 | # The following was in post_config_initialization |
|
601 | 604 | self.init_inspector() |
|
602 | 605 | self.raw_input_original = input |
|
603 | 606 | self.init_completer() |
|
604 | 607 | # TODO: init_io() needs to happen before init_traceback handlers |
|
605 | 608 | # because the traceback handlers hardcode the stdout/stderr streams. |
|
606 | 609 | # This logic in in debugger.Pdb and should eventually be changed. |
|
607 | 610 | self.init_io() |
|
608 | 611 | self.init_traceback_handlers(custom_exceptions) |
|
609 | 612 | self.init_prompts() |
|
610 | 613 | self.init_display_formatter() |
|
611 | 614 | self.init_display_pub() |
|
612 | 615 | self.init_data_pub() |
|
613 | 616 | self.init_displayhook() |
|
614 | 617 | self.init_magics() |
|
615 | 618 | self.init_alias() |
|
616 | 619 | self.init_logstart() |
|
617 | 620 | self.init_pdb() |
|
618 | 621 | self.init_extension_manager() |
|
619 | 622 | self.init_payload() |
|
620 | 623 | self.events.trigger('shell_initialized', self) |
|
621 | 624 | atexit.register(self.atexit_operations) |
|
622 | 625 | |
|
623 | 626 | # The trio runner is used for running Trio in the foreground thread. It |
|
624 | 627 | # is different from `_trio_runner(async_fn)` in `async_helpers.py` |
|
625 | 628 | # which calls `trio.run()` for every cell. This runner runs all cells |
|
626 | 629 | # inside a single Trio event loop. If used, it is set from |
|
627 | 630 | # `ipykernel.kernelapp`. |
|
628 | 631 | self.trio_runner = None |
|
629 | 632 | |
|
630 | 633 | def get_ipython(self): |
|
631 | 634 | """Return the currently running IPython instance.""" |
|
632 | 635 | return self |
|
633 | 636 | |
|
634 | 637 | #------------------------------------------------------------------------- |
|
635 | 638 | # Trait changed handlers |
|
636 | 639 | #------------------------------------------------------------------------- |
|
637 | 640 | @observe('ipython_dir') |
|
638 | 641 | def _ipython_dir_changed(self, change): |
|
639 | 642 | ensure_dir_exists(change['new']) |
|
640 | 643 | |
|
641 | 644 | def set_autoindent(self,value=None): |
|
642 | 645 | """Set the autoindent flag. |
|
643 | 646 | |
|
644 | 647 | If called with no arguments, it acts as a toggle.""" |
|
645 | 648 | if value is None: |
|
646 | 649 | self.autoindent = not self.autoindent |
|
647 | 650 | else: |
|
648 | 651 | self.autoindent = value |
|
649 | 652 | |
|
650 | 653 | def set_trio_runner(self, tr): |
|
651 | 654 | self.trio_runner = tr |
|
652 | 655 | |
|
653 | 656 | #------------------------------------------------------------------------- |
|
654 | 657 | # init_* methods called by __init__ |
|
655 | 658 | #------------------------------------------------------------------------- |
|
656 | 659 | |
|
657 | 660 | def init_ipython_dir(self, ipython_dir): |
|
658 | 661 | if ipython_dir is not None: |
|
659 | 662 | self.ipython_dir = ipython_dir |
|
660 | 663 | return |
|
661 | 664 | |
|
662 | 665 | self.ipython_dir = get_ipython_dir() |
|
663 | 666 | |
|
664 | 667 | def init_profile_dir(self, profile_dir): |
|
665 | 668 | if profile_dir is not None: |
|
666 | 669 | self.profile_dir = profile_dir |
|
667 | 670 | return |
|
668 | 671 | self.profile_dir = ProfileDir.create_profile_dir_by_name( |
|
669 | 672 | self.ipython_dir, "default" |
|
670 | 673 | ) |
|
671 | 674 | |
|
672 | 675 | def init_instance_attrs(self): |
|
673 | 676 | self.more = False |
|
674 | 677 | |
|
675 | 678 | # command compiler |
|
676 | 679 | self.compile = self.compiler_class() |
|
677 | 680 | |
|
678 | 681 | # Make an empty namespace, which extension writers can rely on both |
|
679 | 682 | # existing and NEVER being used by ipython itself. This gives them a |
|
680 | 683 | # convenient location for storing additional information and state |
|
681 | 684 | # their extensions may require, without fear of collisions with other |
|
682 | 685 | # ipython names that may develop later. |
|
683 | 686 | self.meta = Struct() |
|
684 | 687 | |
|
685 | 688 | # Temporary files used for various purposes. Deleted at exit. |
|
686 | 689 | # The files here are stored with Path from Pathlib |
|
687 | 690 | self.tempfiles = [] |
|
688 | 691 | self.tempdirs = [] |
|
689 | 692 | |
|
690 | 693 | # keep track of where we started running (mainly for crash post-mortem) |
|
691 | 694 | # This is not being used anywhere currently. |
|
692 | 695 | self.starting_dir = os.getcwd() |
|
693 | 696 | |
|
694 | 697 | # Indentation management |
|
695 | 698 | self.indent_current_nsp = 0 |
|
696 | 699 | |
|
697 | 700 | # Dict to track post-execution functions that have been registered |
|
698 | 701 | self._post_execute = {} |
|
699 | 702 | |
|
700 | 703 | def init_environment(self): |
|
701 | 704 | """Any changes we need to make to the user's environment.""" |
|
702 | 705 | pass |
|
703 | 706 | |
|
704 | 707 | def init_encoding(self): |
|
705 | 708 | # Get system encoding at startup time. Certain terminals (like Emacs |
|
706 | 709 | # under Win32 have it set to None, and we need to have a known valid |
|
707 | 710 | # encoding to use in the raw_input() method |
|
708 | 711 | try: |
|
709 | 712 | self.stdin_encoding = sys.stdin.encoding or 'ascii' |
|
710 | 713 | except AttributeError: |
|
711 | 714 | self.stdin_encoding = 'ascii' |
|
712 | 715 | |
|
713 | 716 | |
|
714 | 717 | @observe('colors') |
|
715 | 718 | def init_syntax_highlighting(self, changes=None): |
|
716 | 719 | # Python source parser/formatter for syntax highlighting |
|
717 | 720 | pyformat = PyColorize.Parser(style=self.colors, parent=self).format |
|
718 | 721 | self.pycolorize = lambda src: pyformat(src,'str') |
|
719 | 722 | |
|
720 | 723 | def refresh_style(self): |
|
721 | 724 | # No-op here, used in subclass |
|
722 | 725 | pass |
|
723 | 726 | |
|
724 | 727 | def init_pushd_popd_magic(self): |
|
725 | 728 | # for pushd/popd management |
|
726 | 729 | self.home_dir = get_home_dir() |
|
727 | 730 | |
|
728 | 731 | self.dir_stack = [] |
|
729 | 732 | |
|
730 | 733 | def init_logger(self): |
|
731 | 734 | self.logger = Logger(self.home_dir, logfname='ipython_log.py', |
|
732 | 735 | logmode='rotate') |
|
733 | 736 | |
|
734 | 737 | def init_logstart(self): |
|
735 | 738 | """Initialize logging in case it was requested at the command line. |
|
736 | 739 | """ |
|
737 | 740 | if self.logappend: |
|
738 | 741 | self.magic('logstart %s append' % self.logappend) |
|
739 | 742 | elif self.logfile: |
|
740 | 743 | self.magic('logstart %s' % self.logfile) |
|
741 | 744 | elif self.logstart: |
|
742 | 745 | self.magic('logstart') |
|
743 | 746 | |
|
744 | 747 | |
|
745 | 748 | def init_builtins(self): |
|
746 | 749 | # A single, static flag that we set to True. Its presence indicates |
|
747 | 750 | # that an IPython shell has been created, and we make no attempts at |
|
748 | 751 | # removing on exit or representing the existence of more than one |
|
749 | 752 | # IPython at a time. |
|
750 | 753 | builtin_mod.__dict__['__IPYTHON__'] = True |
|
751 | 754 | builtin_mod.__dict__['display'] = display |
|
752 | 755 | |
|
753 | 756 | self.builtin_trap = BuiltinTrap(shell=self) |
|
754 | 757 | |
|
755 | 758 | @observe('colors') |
|
756 | 759 | def init_inspector(self, changes=None): |
|
757 | 760 | # Object inspector |
|
758 |
self.inspector = |
|
|
761 | self.inspector = self.inspector_class( | |
|
762 | oinspect.InspectColors, | |
|
759 | 763 |
|
|
760 | 764 |
|
|
761 |
|
|
|
765 | self.object_info_string_level, | |
|
766 | ) | |
|
762 | 767 | |
|
763 | 768 | def init_io(self): |
|
764 | 769 | # implemented in subclasses, TerminalInteractiveShell does call |
|
765 | 770 | # colorama.init(). |
|
766 | 771 | pass |
|
767 | 772 | |
|
768 | 773 | def init_prompts(self): |
|
769 | 774 | # Set system prompts, so that scripts can decide if they are running |
|
770 | 775 | # interactively. |
|
771 | 776 | sys.ps1 = 'In : ' |
|
772 | 777 | sys.ps2 = '...: ' |
|
773 | 778 | sys.ps3 = 'Out: ' |
|
774 | 779 | |
|
775 | 780 | def init_display_formatter(self): |
|
776 | 781 | self.display_formatter = DisplayFormatter(parent=self) |
|
777 | 782 | self.configurables.append(self.display_formatter) |
|
778 | 783 | |
|
779 | 784 | def init_display_pub(self): |
|
780 | 785 | self.display_pub = self.display_pub_class(parent=self, shell=self) |
|
781 | 786 | self.configurables.append(self.display_pub) |
|
782 | 787 | |
|
783 | 788 | def init_data_pub(self): |
|
784 | 789 | if not self.data_pub_class: |
|
785 | 790 | self.data_pub = None |
|
786 | 791 | return |
|
787 | 792 | self.data_pub = self.data_pub_class(parent=self) |
|
788 | 793 | self.configurables.append(self.data_pub) |
|
789 | 794 | |
|
790 | 795 | def init_displayhook(self): |
|
791 | 796 | # Initialize displayhook, set in/out prompts and printing system |
|
792 | 797 | self.displayhook = self.displayhook_class( |
|
793 | 798 | parent=self, |
|
794 | 799 | shell=self, |
|
795 | 800 | cache_size=self.cache_size, |
|
796 | 801 | ) |
|
797 | 802 | self.configurables.append(self.displayhook) |
|
798 | 803 | # This is a context manager that installs/revmoes the displayhook at |
|
799 | 804 | # the appropriate time. |
|
800 | 805 | self.display_trap = DisplayTrap(hook=self.displayhook) |
|
801 | 806 | |
|
802 | 807 | @staticmethod |
|
803 | 808 | def get_path_links(p: Path): |
|
804 | 809 | """Gets path links including all symlinks |
|
805 | 810 | |
|
806 | 811 | Examples |
|
807 | 812 | -------- |
|
808 | 813 | In [1]: from IPython.core.interactiveshell import InteractiveShell |
|
809 | 814 | |
|
810 | 815 | In [2]: import sys, pathlib |
|
811 | 816 | |
|
812 | 817 | In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable)) |
|
813 | 818 | |
|
814 | 819 | In [4]: len(paths) == len(set(paths)) |
|
815 | 820 | Out[4]: True |
|
816 | 821 | |
|
817 | 822 | In [5]: bool(paths) |
|
818 | 823 | Out[5]: True |
|
819 | 824 | """ |
|
820 | 825 | paths = [p] |
|
821 | 826 | while p.is_symlink(): |
|
822 | 827 | new_path = Path(os.readlink(p)) |
|
823 | 828 | if not new_path.is_absolute(): |
|
824 | 829 | new_path = p.parent / new_path |
|
825 | 830 | p = new_path |
|
826 | 831 | paths.append(p) |
|
827 | 832 | return paths |
|
828 | 833 | |
|
829 | 834 | def init_virtualenv(self): |
|
830 | 835 | """Add the current virtualenv to sys.path so the user can import modules from it. |
|
831 | 836 | This isn't perfect: it doesn't use the Python interpreter with which the |
|
832 | 837 | virtualenv was built, and it ignores the --no-site-packages option. A |
|
833 | 838 | warning will appear suggesting the user installs IPython in the |
|
834 | 839 | virtualenv, but for many cases, it probably works well enough. |
|
835 | 840 | |
|
836 | 841 | Adapted from code snippets online. |
|
837 | 842 | |
|
838 | 843 | http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv |
|
839 | 844 | """ |
|
840 | 845 | if 'VIRTUAL_ENV' not in os.environ: |
|
841 | 846 | # Not in a virtualenv |
|
842 | 847 | return |
|
843 | 848 | elif os.environ["VIRTUAL_ENV"] == "": |
|
844 | 849 | warn("Virtual env path set to '', please check if this is intended.") |
|
845 | 850 | return |
|
846 | 851 | |
|
847 | 852 | p = Path(sys.executable) |
|
848 | 853 | p_venv = Path(os.environ["VIRTUAL_ENV"]) |
|
849 | 854 | |
|
850 | 855 | # fallback venv detection: |
|
851 | 856 | # stdlib venv may symlink sys.executable, so we can't use realpath. |
|
852 | 857 | # but others can symlink *to* the venv Python, so we can't just use sys.executable. |
|
853 | 858 | # So we just check every item in the symlink tree (generally <= 3) |
|
854 | 859 | paths = self.get_path_links(p) |
|
855 | 860 | |
|
856 | 861 | # In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible |
|
857 | 862 | if p_venv.parts[1] == "cygdrive": |
|
858 | 863 | drive_name = p_venv.parts[2] |
|
859 | 864 | p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:]) |
|
860 | 865 | |
|
861 | 866 | if any(p_venv == p.parents[1] for p in paths): |
|
862 | 867 | # Our exe is inside or has access to the virtualenv, don't need to do anything. |
|
863 | 868 | return |
|
864 | 869 | |
|
865 | 870 | if sys.platform == "win32": |
|
866 | 871 | virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages")) |
|
867 | 872 | else: |
|
868 | 873 | virtual_env_path = Path( |
|
869 | 874 | os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages" |
|
870 | 875 | ) |
|
871 | 876 | p_ver = sys.version_info[:2] |
|
872 | 877 | |
|
873 | 878 | # Predict version from py[thon]-x.x in the $VIRTUAL_ENV |
|
874 | 879 | re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"]) |
|
875 | 880 | if re_m: |
|
876 | 881 | predicted_path = Path(str(virtual_env_path).format(*re_m.groups())) |
|
877 | 882 | if predicted_path.exists(): |
|
878 | 883 | p_ver = re_m.groups() |
|
879 | 884 | |
|
880 | 885 | virtual_env = str(virtual_env_path).format(*p_ver) |
|
881 | 886 | if self.warn_venv: |
|
882 | 887 | warn( |
|
883 | 888 | "Attempting to work in a virtualenv. If you encounter problems, " |
|
884 | 889 | "please install IPython inside the virtualenv." |
|
885 | 890 | ) |
|
886 | 891 | import site |
|
887 | 892 | sys.path.insert(0, virtual_env) |
|
888 | 893 | site.addsitedir(virtual_env) |
|
889 | 894 | |
|
890 | 895 | #------------------------------------------------------------------------- |
|
891 | 896 | # Things related to injections into the sys module |
|
892 | 897 | #------------------------------------------------------------------------- |
|
893 | 898 | |
|
894 | 899 | def save_sys_module_state(self): |
|
895 | 900 | """Save the state of hooks in the sys module. |
|
896 | 901 | |
|
897 | 902 | This has to be called after self.user_module is created. |
|
898 | 903 | """ |
|
899 | 904 | self._orig_sys_module_state = {'stdin': sys.stdin, |
|
900 | 905 | 'stdout': sys.stdout, |
|
901 | 906 | 'stderr': sys.stderr, |
|
902 | 907 | 'excepthook': sys.excepthook} |
|
903 | 908 | self._orig_sys_modules_main_name = self.user_module.__name__ |
|
904 | 909 | self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__) |
|
905 | 910 | |
|
906 | 911 | def restore_sys_module_state(self): |
|
907 | 912 | """Restore the state of the sys module.""" |
|
908 | 913 | try: |
|
909 | 914 | for k, v in self._orig_sys_module_state.items(): |
|
910 | 915 | setattr(sys, k, v) |
|
911 | 916 | except AttributeError: |
|
912 | 917 | pass |
|
913 | 918 | # Reset what what done in self.init_sys_modules |
|
914 | 919 | if self._orig_sys_modules_main_mod is not None: |
|
915 | 920 | sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod |
|
916 | 921 | |
|
917 | 922 | #------------------------------------------------------------------------- |
|
918 | 923 | # Things related to the banner |
|
919 | 924 | #------------------------------------------------------------------------- |
|
920 | 925 | |
|
921 | 926 | @property |
|
922 | 927 | def banner(self): |
|
923 | 928 | banner = self.banner1 |
|
924 | 929 | if self.profile and self.profile != 'default': |
|
925 | 930 | banner += '\nIPython profile: %s\n' % self.profile |
|
926 | 931 | if self.banner2: |
|
927 | 932 | banner += '\n' + self.banner2 |
|
928 | 933 | return banner |
|
929 | 934 | |
|
930 | 935 | def show_banner(self, banner=None): |
|
931 | 936 | if banner is None: |
|
932 | 937 | banner = self.banner |
|
933 | 938 | sys.stdout.write(banner) |
|
934 | 939 | |
|
935 | 940 | #------------------------------------------------------------------------- |
|
936 | 941 | # Things related to hooks |
|
937 | 942 | #------------------------------------------------------------------------- |
|
938 | 943 | |
|
939 | 944 | def init_hooks(self): |
|
940 | 945 | # hooks holds pointers used for user-side customizations |
|
941 | 946 | self.hooks = Struct() |
|
942 | 947 | |
|
943 | 948 | self.strdispatchers = {} |
|
944 | 949 | |
|
945 | 950 | # Set all default hooks, defined in the IPython.hooks module. |
|
946 | 951 | hooks = IPython.core.hooks |
|
947 | 952 | for hook_name in hooks.__all__: |
|
948 | 953 | # default hooks have priority 100, i.e. low; user hooks should have |
|
949 | 954 | # 0-100 priority |
|
950 | 955 | self.set_hook(hook_name, getattr(hooks, hook_name), 100) |
|
951 | 956 | |
|
952 | 957 | if self.display_page: |
|
953 | 958 | self.set_hook('show_in_pager', page.as_hook(page.display_page), 90) |
|
954 | 959 | |
|
955 | 960 | def set_hook(self, name, hook, priority=50, str_key=None, re_key=None): |
|
956 | 961 | """set_hook(name,hook) -> sets an internal IPython hook. |
|
957 | 962 | |
|
958 | 963 | IPython exposes some of its internal API as user-modifiable hooks. By |
|
959 | 964 | adding your function to one of these hooks, you can modify IPython's |
|
960 | 965 | behavior to call at runtime your own routines.""" |
|
961 | 966 | |
|
962 | 967 | # At some point in the future, this should validate the hook before it |
|
963 | 968 | # accepts it. Probably at least check that the hook takes the number |
|
964 | 969 | # of args it's supposed to. |
|
965 | 970 | |
|
966 | 971 | f = types.MethodType(hook,self) |
|
967 | 972 | |
|
968 | 973 | # check if the hook is for strdispatcher first |
|
969 | 974 | if str_key is not None: |
|
970 | 975 | sdp = self.strdispatchers.get(name, StrDispatch()) |
|
971 | 976 | sdp.add_s(str_key, f, priority ) |
|
972 | 977 | self.strdispatchers[name] = sdp |
|
973 | 978 | return |
|
974 | 979 | if re_key is not None: |
|
975 | 980 | sdp = self.strdispatchers.get(name, StrDispatch()) |
|
976 | 981 | sdp.add_re(re.compile(re_key), f, priority ) |
|
977 | 982 | self.strdispatchers[name] = sdp |
|
978 | 983 | return |
|
979 | 984 | |
|
980 | 985 | dp = getattr(self.hooks, name, None) |
|
981 | 986 | if name not in IPython.core.hooks.__all__: |
|
982 | 987 | print("Warning! Hook '%s' is not one of %s" % \ |
|
983 | 988 | (name, IPython.core.hooks.__all__ )) |
|
984 | 989 | |
|
985 | 990 | if name in IPython.core.hooks.deprecated: |
|
986 | 991 | alternative = IPython.core.hooks.deprecated[name] |
|
987 | 992 | raise ValueError( |
|
988 | 993 | "Hook {} has been deprecated since IPython 5.0. Use {} instead.".format( |
|
989 | 994 | name, alternative |
|
990 | 995 | ) |
|
991 | 996 | ) |
|
992 | 997 | |
|
993 | 998 | if not dp: |
|
994 | 999 | dp = IPython.core.hooks.CommandChainDispatcher() |
|
995 | 1000 | |
|
996 | 1001 | try: |
|
997 | 1002 | dp.add(f,priority) |
|
998 | 1003 | except AttributeError: |
|
999 | 1004 | # it was not commandchain, plain old func - replace |
|
1000 | 1005 | dp = f |
|
1001 | 1006 | |
|
1002 | 1007 | setattr(self.hooks,name, dp) |
|
1003 | 1008 | |
|
1004 | 1009 | #------------------------------------------------------------------------- |
|
1005 | 1010 | # Things related to events |
|
1006 | 1011 | #------------------------------------------------------------------------- |
|
1007 | 1012 | |
|
1008 | 1013 | def init_events(self): |
|
1009 | 1014 | self.events = EventManager(self, available_events) |
|
1010 | 1015 | |
|
1011 | 1016 | self.events.register("pre_execute", self._clear_warning_registry) |
|
1012 | 1017 | |
|
1013 | 1018 | def register_post_execute(self, func): |
|
1014 | 1019 | """DEPRECATED: Use ip.events.register('post_run_cell', func) |
|
1015 | 1020 | |
|
1016 | 1021 | Register a function for calling after code execution. |
|
1017 | 1022 | """ |
|
1018 | 1023 | raise ValueError( |
|
1019 | 1024 | "ip.register_post_execute is deprecated since IPython 1.0, use " |
|
1020 | 1025 | "ip.events.register('post_run_cell', func) instead." |
|
1021 | 1026 | ) |
|
1022 | 1027 | |
|
1023 | 1028 | def _clear_warning_registry(self): |
|
1024 | 1029 | # clear the warning registry, so that different code blocks with |
|
1025 | 1030 | # overlapping line number ranges don't cause spurious suppression of |
|
1026 | 1031 | # warnings (see gh-6611 for details) |
|
1027 | 1032 | if "__warningregistry__" in self.user_global_ns: |
|
1028 | 1033 | del self.user_global_ns["__warningregistry__"] |
|
1029 | 1034 | |
|
1030 | 1035 | #------------------------------------------------------------------------- |
|
1031 | 1036 | # Things related to the "main" module |
|
1032 | 1037 | #------------------------------------------------------------------------- |
|
1033 | 1038 | |
|
1034 | 1039 | def new_main_mod(self, filename, modname): |
|
1035 | 1040 | """Return a new 'main' module object for user code execution. |
|
1036 | 1041 | |
|
1037 | 1042 | ``filename`` should be the path of the script which will be run in the |
|
1038 | 1043 | module. Requests with the same filename will get the same module, with |
|
1039 | 1044 | its namespace cleared. |
|
1040 | 1045 | |
|
1041 | 1046 | ``modname`` should be the module name - normally either '__main__' or |
|
1042 | 1047 | the basename of the file without the extension. |
|
1043 | 1048 | |
|
1044 | 1049 | When scripts are executed via %run, we must keep a reference to their |
|
1045 | 1050 | __main__ module around so that Python doesn't |
|
1046 | 1051 | clear it, rendering references to module globals useless. |
|
1047 | 1052 | |
|
1048 | 1053 | This method keeps said reference in a private dict, keyed by the |
|
1049 | 1054 | absolute path of the script. This way, for multiple executions of the |
|
1050 | 1055 | same script we only keep one copy of the namespace (the last one), |
|
1051 | 1056 | thus preventing memory leaks from old references while allowing the |
|
1052 | 1057 | objects from the last execution to be accessible. |
|
1053 | 1058 | """ |
|
1054 | 1059 | filename = os.path.abspath(filename) |
|
1055 | 1060 | try: |
|
1056 | 1061 | main_mod = self._main_mod_cache[filename] |
|
1057 | 1062 | except KeyError: |
|
1058 | 1063 | main_mod = self._main_mod_cache[filename] = types.ModuleType( |
|
1059 | 1064 | modname, |
|
1060 | 1065 | doc="Module created for script run in IPython") |
|
1061 | 1066 | else: |
|
1062 | 1067 | main_mod.__dict__.clear() |
|
1063 | 1068 | main_mod.__name__ = modname |
|
1064 | 1069 | |
|
1065 | 1070 | main_mod.__file__ = filename |
|
1066 | 1071 | # It seems pydoc (and perhaps others) needs any module instance to |
|
1067 | 1072 | # implement a __nonzero__ method |
|
1068 | 1073 | main_mod.__nonzero__ = lambda : True |
|
1069 | 1074 | |
|
1070 | 1075 | return main_mod |
|
1071 | 1076 | |
|
1072 | 1077 | def clear_main_mod_cache(self): |
|
1073 | 1078 | """Clear the cache of main modules. |
|
1074 | 1079 | |
|
1075 | 1080 | Mainly for use by utilities like %reset. |
|
1076 | 1081 | |
|
1077 | 1082 | Examples |
|
1078 | 1083 | -------- |
|
1079 | 1084 | In [15]: import IPython |
|
1080 | 1085 | |
|
1081 | 1086 | In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython') |
|
1082 | 1087 | |
|
1083 | 1088 | In [17]: len(_ip._main_mod_cache) > 0 |
|
1084 | 1089 | Out[17]: True |
|
1085 | 1090 | |
|
1086 | 1091 | In [18]: _ip.clear_main_mod_cache() |
|
1087 | 1092 | |
|
1088 | 1093 | In [19]: len(_ip._main_mod_cache) == 0 |
|
1089 | 1094 | Out[19]: True |
|
1090 | 1095 | """ |
|
1091 | 1096 | self._main_mod_cache.clear() |
|
1092 | 1097 | |
|
1093 | 1098 | #------------------------------------------------------------------------- |
|
1094 | 1099 | # Things related to debugging |
|
1095 | 1100 | #------------------------------------------------------------------------- |
|
1096 | 1101 | |
|
1097 | 1102 | def init_pdb(self): |
|
1098 | 1103 | # Set calling of pdb on exceptions |
|
1099 | 1104 | # self.call_pdb is a property |
|
1100 | 1105 | self.call_pdb = self.pdb |
|
1101 | 1106 | |
|
1102 | 1107 | def _get_call_pdb(self): |
|
1103 | 1108 | return self._call_pdb |
|
1104 | 1109 | |
|
1105 | 1110 | def _set_call_pdb(self,val): |
|
1106 | 1111 | |
|
1107 | 1112 | if val not in (0,1,False,True): |
|
1108 | 1113 | raise ValueError('new call_pdb value must be boolean') |
|
1109 | 1114 | |
|
1110 | 1115 | # store value in instance |
|
1111 | 1116 | self._call_pdb = val |
|
1112 | 1117 | |
|
1113 | 1118 | # notify the actual exception handlers |
|
1114 | 1119 | self.InteractiveTB.call_pdb = val |
|
1115 | 1120 | |
|
1116 | 1121 | call_pdb = property(_get_call_pdb,_set_call_pdb,None, |
|
1117 | 1122 | 'Control auto-activation of pdb at exceptions') |
|
1118 | 1123 | |
|
1119 | 1124 | def debugger(self,force=False): |
|
1120 | 1125 | """Call the pdb debugger. |
|
1121 | 1126 | |
|
1122 | 1127 | Keywords: |
|
1123 | 1128 | |
|
1124 | 1129 | - force(False): by default, this routine checks the instance call_pdb |
|
1125 | 1130 | flag and does not actually invoke the debugger if the flag is false. |
|
1126 | 1131 | The 'force' option forces the debugger to activate even if the flag |
|
1127 | 1132 | is false. |
|
1128 | 1133 | """ |
|
1129 | 1134 | |
|
1130 | 1135 | if not (force or self.call_pdb): |
|
1131 | 1136 | return |
|
1132 | 1137 | |
|
1133 | 1138 | if not hasattr(sys,'last_traceback'): |
|
1134 | 1139 | error('No traceback has been produced, nothing to debug.') |
|
1135 | 1140 | return |
|
1136 | 1141 | |
|
1137 | 1142 | self.InteractiveTB.debugger(force=True) |
|
1138 | 1143 | |
|
1139 | 1144 | #------------------------------------------------------------------------- |
|
1140 | 1145 | # Things related to IPython's various namespaces |
|
1141 | 1146 | #------------------------------------------------------------------------- |
|
1142 | 1147 | default_user_namespaces = True |
|
1143 | 1148 | |
|
1144 | 1149 | def init_create_namespaces(self, user_module=None, user_ns=None): |
|
1145 | 1150 | # Create the namespace where the user will operate. user_ns is |
|
1146 | 1151 | # normally the only one used, and it is passed to the exec calls as |
|
1147 | 1152 | # the locals argument. But we do carry a user_global_ns namespace |
|
1148 | 1153 | # given as the exec 'globals' argument, This is useful in embedding |
|
1149 | 1154 | # situations where the ipython shell opens in a context where the |
|
1150 | 1155 | # distinction between locals and globals is meaningful. For |
|
1151 | 1156 | # non-embedded contexts, it is just the same object as the user_ns dict. |
|
1152 | 1157 | |
|
1153 | 1158 | # FIXME. For some strange reason, __builtins__ is showing up at user |
|
1154 | 1159 | # level as a dict instead of a module. This is a manual fix, but I |
|
1155 | 1160 | # should really track down where the problem is coming from. Alex |
|
1156 | 1161 | # Schmolck reported this problem first. |
|
1157 | 1162 | |
|
1158 | 1163 | # A useful post by Alex Martelli on this topic: |
|
1159 | 1164 | # Re: inconsistent value from __builtins__ |
|
1160 | 1165 | # Von: Alex Martelli <aleaxit@yahoo.com> |
|
1161 | 1166 | # Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends |
|
1162 | 1167 | # Gruppen: comp.lang.python |
|
1163 | 1168 | |
|
1164 | 1169 | # Michael Hohn <hohn@hooknose.lbl.gov> wrote: |
|
1165 | 1170 | # > >>> print type(builtin_check.get_global_binding('__builtins__')) |
|
1166 | 1171 | # > <type 'dict'> |
|
1167 | 1172 | # > >>> print type(__builtins__) |
|
1168 | 1173 | # > <type 'module'> |
|
1169 | 1174 | # > Is this difference in return value intentional? |
|
1170 | 1175 | |
|
1171 | 1176 | # Well, it's documented that '__builtins__' can be either a dictionary |
|
1172 | 1177 | # or a module, and it's been that way for a long time. Whether it's |
|
1173 | 1178 | # intentional (or sensible), I don't know. In any case, the idea is |
|
1174 | 1179 | # that if you need to access the built-in namespace directly, you |
|
1175 | 1180 | # should start with "import __builtin__" (note, no 's') which will |
|
1176 | 1181 | # definitely give you a module. Yeah, it's somewhat confusing:-(. |
|
1177 | 1182 | |
|
1178 | 1183 | # These routines return a properly built module and dict as needed by |
|
1179 | 1184 | # the rest of the code, and can also be used by extension writers to |
|
1180 | 1185 | # generate properly initialized namespaces. |
|
1181 | 1186 | if (user_ns is not None) or (user_module is not None): |
|
1182 | 1187 | self.default_user_namespaces = False |
|
1183 | 1188 | self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns) |
|
1184 | 1189 | |
|
1185 | 1190 | # A record of hidden variables we have added to the user namespace, so |
|
1186 | 1191 | # we can list later only variables defined in actual interactive use. |
|
1187 | 1192 | self.user_ns_hidden = {} |
|
1188 | 1193 | |
|
1189 | 1194 | # Now that FakeModule produces a real module, we've run into a nasty |
|
1190 | 1195 | # problem: after script execution (via %run), the module where the user |
|
1191 | 1196 | # code ran is deleted. Now that this object is a true module (needed |
|
1192 | 1197 | # so doctest and other tools work correctly), the Python module |
|
1193 | 1198 | # teardown mechanism runs over it, and sets to None every variable |
|
1194 | 1199 | # present in that module. Top-level references to objects from the |
|
1195 | 1200 | # script survive, because the user_ns is updated with them. However, |
|
1196 | 1201 | # calling functions defined in the script that use other things from |
|
1197 | 1202 | # the script will fail, because the function's closure had references |
|
1198 | 1203 | # to the original objects, which are now all None. So we must protect |
|
1199 | 1204 | # these modules from deletion by keeping a cache. |
|
1200 | 1205 | # |
|
1201 | 1206 | # To avoid keeping stale modules around (we only need the one from the |
|
1202 | 1207 | # last run), we use a dict keyed with the full path to the script, so |
|
1203 | 1208 | # only the last version of the module is held in the cache. Note, |
|
1204 | 1209 | # however, that we must cache the module *namespace contents* (their |
|
1205 | 1210 | # __dict__). Because if we try to cache the actual modules, old ones |
|
1206 | 1211 | # (uncached) could be destroyed while still holding references (such as |
|
1207 | 1212 | # those held by GUI objects that tend to be long-lived)> |
|
1208 | 1213 | # |
|
1209 | 1214 | # The %reset command will flush this cache. See the cache_main_mod() |
|
1210 | 1215 | # and clear_main_mod_cache() methods for details on use. |
|
1211 | 1216 | |
|
1212 | 1217 | # This is the cache used for 'main' namespaces |
|
1213 | 1218 | self._main_mod_cache = {} |
|
1214 | 1219 | |
|
1215 | 1220 | # A table holding all the namespaces IPython deals with, so that |
|
1216 | 1221 | # introspection facilities can search easily. |
|
1217 | 1222 | self.ns_table = {'user_global':self.user_module.__dict__, |
|
1218 | 1223 | 'user_local':self.user_ns, |
|
1219 | 1224 | 'builtin':builtin_mod.__dict__ |
|
1220 | 1225 | } |
|
1221 | 1226 | |
|
1222 | 1227 | @property |
|
1223 | 1228 | def user_global_ns(self): |
|
1224 | 1229 | return self.user_module.__dict__ |
|
1225 | 1230 | |
|
1226 | 1231 | def prepare_user_module(self, user_module=None, user_ns=None): |
|
1227 | 1232 | """Prepare the module and namespace in which user code will be run. |
|
1228 | 1233 | |
|
1229 | 1234 | When IPython is started normally, both parameters are None: a new module |
|
1230 | 1235 | is created automatically, and its __dict__ used as the namespace. |
|
1231 | 1236 | |
|
1232 | 1237 | If only user_module is provided, its __dict__ is used as the namespace. |
|
1233 | 1238 | If only user_ns is provided, a dummy module is created, and user_ns |
|
1234 | 1239 | becomes the global namespace. If both are provided (as they may be |
|
1235 | 1240 | when embedding), user_ns is the local namespace, and user_module |
|
1236 | 1241 | provides the global namespace. |
|
1237 | 1242 | |
|
1238 | 1243 | Parameters |
|
1239 | 1244 | ---------- |
|
1240 | 1245 | user_module : module, optional |
|
1241 | 1246 | The current user module in which IPython is being run. If None, |
|
1242 | 1247 | a clean module will be created. |
|
1243 | 1248 | user_ns : dict, optional |
|
1244 | 1249 | A namespace in which to run interactive commands. |
|
1245 | 1250 | |
|
1246 | 1251 | Returns |
|
1247 | 1252 | ------- |
|
1248 | 1253 | A tuple of user_module and user_ns, each properly initialised. |
|
1249 | 1254 | """ |
|
1250 | 1255 | if user_module is None and user_ns is not None: |
|
1251 | 1256 | user_ns.setdefault("__name__", "__main__") |
|
1252 | 1257 | user_module = DummyMod() |
|
1253 | 1258 | user_module.__dict__ = user_ns |
|
1254 | 1259 | |
|
1255 | 1260 | if user_module is None: |
|
1256 | 1261 | user_module = types.ModuleType("__main__", |
|
1257 | 1262 | doc="Automatically created module for IPython interactive environment") |
|
1258 | 1263 | |
|
1259 | 1264 | # We must ensure that __builtin__ (without the final 's') is always |
|
1260 | 1265 | # available and pointing to the __builtin__ *module*. For more details: |
|
1261 | 1266 | # http://mail.python.org/pipermail/python-dev/2001-April/014068.html |
|
1262 | 1267 | user_module.__dict__.setdefault('__builtin__', builtin_mod) |
|
1263 | 1268 | user_module.__dict__.setdefault('__builtins__', builtin_mod) |
|
1264 | 1269 | |
|
1265 | 1270 | if user_ns is None: |
|
1266 | 1271 | user_ns = user_module.__dict__ |
|
1267 | 1272 | |
|
1268 | 1273 | return user_module, user_ns |
|
1269 | 1274 | |
|
1270 | 1275 | def init_sys_modules(self): |
|
1271 | 1276 | # We need to insert into sys.modules something that looks like a |
|
1272 | 1277 | # module but which accesses the IPython namespace, for shelve and |
|
1273 | 1278 | # pickle to work interactively. Normally they rely on getting |
|
1274 | 1279 | # everything out of __main__, but for embedding purposes each IPython |
|
1275 | 1280 | # instance has its own private namespace, so we can't go shoving |
|
1276 | 1281 | # everything into __main__. |
|
1277 | 1282 | |
|
1278 | 1283 | # note, however, that we should only do this for non-embedded |
|
1279 | 1284 | # ipythons, which really mimic the __main__.__dict__ with their own |
|
1280 | 1285 | # namespace. Embedded instances, on the other hand, should not do |
|
1281 | 1286 | # this because they need to manage the user local/global namespaces |
|
1282 | 1287 | # only, but they live within a 'normal' __main__ (meaning, they |
|
1283 | 1288 | # shouldn't overtake the execution environment of the script they're |
|
1284 | 1289 | # embedded in). |
|
1285 | 1290 | |
|
1286 | 1291 | # This is overridden in the InteractiveShellEmbed subclass to a no-op. |
|
1287 | 1292 | main_name = self.user_module.__name__ |
|
1288 | 1293 | sys.modules[main_name] = self.user_module |
|
1289 | 1294 | |
|
1290 | 1295 | def init_user_ns(self): |
|
1291 | 1296 | """Initialize all user-visible namespaces to their minimum defaults. |
|
1292 | 1297 | |
|
1293 | 1298 | Certain history lists are also initialized here, as they effectively |
|
1294 | 1299 | act as user namespaces. |
|
1295 | 1300 | |
|
1296 | 1301 | Notes |
|
1297 | 1302 | ----- |
|
1298 | 1303 | All data structures here are only filled in, they are NOT reset by this |
|
1299 | 1304 | method. If they were not empty before, data will simply be added to |
|
1300 | 1305 | them. |
|
1301 | 1306 | """ |
|
1302 | 1307 | # This function works in two parts: first we put a few things in |
|
1303 | 1308 | # user_ns, and we sync that contents into user_ns_hidden so that these |
|
1304 | 1309 | # initial variables aren't shown by %who. After the sync, we add the |
|
1305 | 1310 | # rest of what we *do* want the user to see with %who even on a new |
|
1306 | 1311 | # session (probably nothing, so they really only see their own stuff) |
|
1307 | 1312 | |
|
1308 | 1313 | # The user dict must *always* have a __builtin__ reference to the |
|
1309 | 1314 | # Python standard __builtin__ namespace, which must be imported. |
|
1310 | 1315 | # This is so that certain operations in prompt evaluation can be |
|
1311 | 1316 | # reliably executed with builtins. Note that we can NOT use |
|
1312 | 1317 | # __builtins__ (note the 's'), because that can either be a dict or a |
|
1313 | 1318 | # module, and can even mutate at runtime, depending on the context |
|
1314 | 1319 | # (Python makes no guarantees on it). In contrast, __builtin__ is |
|
1315 | 1320 | # always a module object, though it must be explicitly imported. |
|
1316 | 1321 | |
|
1317 | 1322 | # For more details: |
|
1318 | 1323 | # http://mail.python.org/pipermail/python-dev/2001-April/014068.html |
|
1319 | 1324 | ns = {} |
|
1320 | 1325 | |
|
1321 | 1326 | # make global variables for user access to the histories |
|
1322 | 1327 | ns['_ih'] = self.history_manager.input_hist_parsed |
|
1323 | 1328 | ns['_oh'] = self.history_manager.output_hist |
|
1324 | 1329 | ns['_dh'] = self.history_manager.dir_hist |
|
1325 | 1330 | |
|
1326 | 1331 | # user aliases to input and output histories. These shouldn't show up |
|
1327 | 1332 | # in %who, as they can have very large reprs. |
|
1328 | 1333 | ns['In'] = self.history_manager.input_hist_parsed |
|
1329 | 1334 | ns['Out'] = self.history_manager.output_hist |
|
1330 | 1335 | |
|
1331 | 1336 | # Store myself as the public api!!! |
|
1332 | 1337 | ns['get_ipython'] = self.get_ipython |
|
1333 | 1338 | |
|
1334 | 1339 | ns['exit'] = self.exiter |
|
1335 | 1340 | ns['quit'] = self.exiter |
|
1336 | 1341 | ns["open"] = _modified_open |
|
1337 | 1342 | |
|
1338 | 1343 | # Sync what we've added so far to user_ns_hidden so these aren't seen |
|
1339 | 1344 | # by %who |
|
1340 | 1345 | self.user_ns_hidden.update(ns) |
|
1341 | 1346 | |
|
1342 | 1347 | # Anything put into ns now would show up in %who. Think twice before |
|
1343 | 1348 | # putting anything here, as we really want %who to show the user their |
|
1344 | 1349 | # stuff, not our variables. |
|
1345 | 1350 | |
|
1346 | 1351 | # Finally, update the real user's namespace |
|
1347 | 1352 | self.user_ns.update(ns) |
|
1348 | 1353 | |
|
1349 | 1354 | @property |
|
1350 | 1355 | def all_ns_refs(self): |
|
1351 | 1356 | """Get a list of references to all the namespace dictionaries in which |
|
1352 | 1357 | IPython might store a user-created object. |
|
1353 | 1358 | |
|
1354 | 1359 | Note that this does not include the displayhook, which also caches |
|
1355 | 1360 | objects from the output.""" |
|
1356 | 1361 | return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \ |
|
1357 | 1362 | [m.__dict__ for m in self._main_mod_cache.values()] |
|
1358 | 1363 | |
|
1359 | 1364 | def reset(self, new_session=True, aggressive=False): |
|
1360 | 1365 | """Clear all internal namespaces, and attempt to release references to |
|
1361 | 1366 | user objects. |
|
1362 | 1367 | |
|
1363 | 1368 | If new_session is True, a new history session will be opened. |
|
1364 | 1369 | """ |
|
1365 | 1370 | # Clear histories |
|
1366 | 1371 | self.history_manager.reset(new_session) |
|
1367 | 1372 | # Reset counter used to index all histories |
|
1368 | 1373 | if new_session: |
|
1369 | 1374 | self.execution_count = 1 |
|
1370 | 1375 | |
|
1371 | 1376 | # Reset last execution result |
|
1372 | 1377 | self.last_execution_succeeded = True |
|
1373 | 1378 | self.last_execution_result = None |
|
1374 | 1379 | |
|
1375 | 1380 | # Flush cached output items |
|
1376 | 1381 | if self.displayhook.do_full_cache: |
|
1377 | 1382 | self.displayhook.flush() |
|
1378 | 1383 | |
|
1379 | 1384 | # The main execution namespaces must be cleared very carefully, |
|
1380 | 1385 | # skipping the deletion of the builtin-related keys, because doing so |
|
1381 | 1386 | # would cause errors in many object's __del__ methods. |
|
1382 | 1387 | if self.user_ns is not self.user_global_ns: |
|
1383 | 1388 | self.user_ns.clear() |
|
1384 | 1389 | ns = self.user_global_ns |
|
1385 | 1390 | drop_keys = set(ns.keys()) |
|
1386 | 1391 | drop_keys.discard('__builtin__') |
|
1387 | 1392 | drop_keys.discard('__builtins__') |
|
1388 | 1393 | drop_keys.discard('__name__') |
|
1389 | 1394 | for k in drop_keys: |
|
1390 | 1395 | del ns[k] |
|
1391 | 1396 | |
|
1392 | 1397 | self.user_ns_hidden.clear() |
|
1393 | 1398 | |
|
1394 | 1399 | # Restore the user namespaces to minimal usability |
|
1395 | 1400 | self.init_user_ns() |
|
1396 | 1401 | if aggressive and not hasattr(self, "_sys_modules_keys"): |
|
1397 | 1402 | print("Cannot restore sys.module, no snapshot") |
|
1398 | 1403 | elif aggressive: |
|
1399 | 1404 | print("culling sys module...") |
|
1400 | 1405 | current_keys = set(sys.modules.keys()) |
|
1401 | 1406 | for k in current_keys - self._sys_modules_keys: |
|
1402 | 1407 | if k.startswith("multiprocessing"): |
|
1403 | 1408 | continue |
|
1404 | 1409 | del sys.modules[k] |
|
1405 | 1410 | |
|
1406 | 1411 | # Restore the default and user aliases |
|
1407 | 1412 | self.alias_manager.clear_aliases() |
|
1408 | 1413 | self.alias_manager.init_aliases() |
|
1409 | 1414 | |
|
1410 | 1415 | # Now define aliases that only make sense on the terminal, because they |
|
1411 | 1416 | # need direct access to the console in a way that we can't emulate in |
|
1412 | 1417 | # GUI or web frontend |
|
1413 | 1418 | if os.name == 'posix': |
|
1414 | 1419 | for cmd in ('clear', 'more', 'less', 'man'): |
|
1415 | 1420 | if cmd not in self.magics_manager.magics['line']: |
|
1416 | 1421 | self.alias_manager.soft_define_alias(cmd, cmd) |
|
1417 | 1422 | |
|
1418 | 1423 | # Flush the private list of module references kept for script |
|
1419 | 1424 | # execution protection |
|
1420 | 1425 | self.clear_main_mod_cache() |
|
1421 | 1426 | |
|
1422 | 1427 | def del_var(self, varname, by_name=False): |
|
1423 | 1428 | """Delete a variable from the various namespaces, so that, as |
|
1424 | 1429 | far as possible, we're not keeping any hidden references to it. |
|
1425 | 1430 | |
|
1426 | 1431 | Parameters |
|
1427 | 1432 | ---------- |
|
1428 | 1433 | varname : str |
|
1429 | 1434 | The name of the variable to delete. |
|
1430 | 1435 | by_name : bool |
|
1431 | 1436 | If True, delete variables with the given name in each |
|
1432 | 1437 | namespace. If False (default), find the variable in the user |
|
1433 | 1438 | namespace, and delete references to it. |
|
1434 | 1439 | """ |
|
1435 | 1440 | if varname in ('__builtin__', '__builtins__'): |
|
1436 | 1441 | raise ValueError("Refusing to delete %s" % varname) |
|
1437 | 1442 | |
|
1438 | 1443 | ns_refs = self.all_ns_refs |
|
1439 | 1444 | |
|
1440 | 1445 | if by_name: # Delete by name |
|
1441 | 1446 | for ns in ns_refs: |
|
1442 | 1447 | try: |
|
1443 | 1448 | del ns[varname] |
|
1444 | 1449 | except KeyError: |
|
1445 | 1450 | pass |
|
1446 | 1451 | else: # Delete by object |
|
1447 | 1452 | try: |
|
1448 | 1453 | obj = self.user_ns[varname] |
|
1449 | 1454 | except KeyError as e: |
|
1450 | 1455 | raise NameError("name '%s' is not defined" % varname) from e |
|
1451 | 1456 | # Also check in output history |
|
1452 | 1457 | ns_refs.append(self.history_manager.output_hist) |
|
1453 | 1458 | for ns in ns_refs: |
|
1454 | 1459 | to_delete = [n for n, o in ns.items() if o is obj] |
|
1455 | 1460 | for name in to_delete: |
|
1456 | 1461 | del ns[name] |
|
1457 | 1462 | |
|
1458 | 1463 | # Ensure it is removed from the last execution result |
|
1459 | 1464 | if self.last_execution_result.result is obj: |
|
1460 | 1465 | self.last_execution_result = None |
|
1461 | 1466 | |
|
1462 | 1467 | # displayhook keeps extra references, but not in a dictionary |
|
1463 | 1468 | for name in ('_', '__', '___'): |
|
1464 | 1469 | if getattr(self.displayhook, name) is obj: |
|
1465 | 1470 | setattr(self.displayhook, name, None) |
|
1466 | 1471 | |
|
1467 | 1472 | def reset_selective(self, regex=None): |
|
1468 | 1473 | """Clear selective variables from internal namespaces based on a |
|
1469 | 1474 | specified regular expression. |
|
1470 | 1475 | |
|
1471 | 1476 | Parameters |
|
1472 | 1477 | ---------- |
|
1473 | 1478 | regex : string or compiled pattern, optional |
|
1474 | 1479 | A regular expression pattern that will be used in searching |
|
1475 | 1480 | variable names in the users namespaces. |
|
1476 | 1481 | """ |
|
1477 | 1482 | if regex is not None: |
|
1478 | 1483 | try: |
|
1479 | 1484 | m = re.compile(regex) |
|
1480 | 1485 | except TypeError as e: |
|
1481 | 1486 | raise TypeError('regex must be a string or compiled pattern') from e |
|
1482 | 1487 | # Search for keys in each namespace that match the given regex |
|
1483 | 1488 | # If a match is found, delete the key/value pair. |
|
1484 | 1489 | for ns in self.all_ns_refs: |
|
1485 | 1490 | for var in ns: |
|
1486 | 1491 | if m.search(var): |
|
1487 | 1492 | del ns[var] |
|
1488 | 1493 | |
|
1489 | 1494 | def push(self, variables, interactive=True): |
|
1490 | 1495 | """Inject a group of variables into the IPython user namespace. |
|
1491 | 1496 | |
|
1492 | 1497 | Parameters |
|
1493 | 1498 | ---------- |
|
1494 | 1499 | variables : dict, str or list/tuple of str |
|
1495 | 1500 | The variables to inject into the user's namespace. If a dict, a |
|
1496 | 1501 | simple update is done. If a str, the string is assumed to have |
|
1497 | 1502 | variable names separated by spaces. A list/tuple of str can also |
|
1498 | 1503 | be used to give the variable names. If just the variable names are |
|
1499 | 1504 | give (list/tuple/str) then the variable values looked up in the |
|
1500 | 1505 | callers frame. |
|
1501 | 1506 | interactive : bool |
|
1502 | 1507 | If True (default), the variables will be listed with the ``who`` |
|
1503 | 1508 | magic. |
|
1504 | 1509 | """ |
|
1505 | 1510 | vdict = None |
|
1506 | 1511 | |
|
1507 | 1512 | # We need a dict of name/value pairs to do namespace updates. |
|
1508 | 1513 | if isinstance(variables, dict): |
|
1509 | 1514 | vdict = variables |
|
1510 | 1515 | elif isinstance(variables, (str, list, tuple)): |
|
1511 | 1516 | if isinstance(variables, str): |
|
1512 | 1517 | vlist = variables.split() |
|
1513 | 1518 | else: |
|
1514 | 1519 | vlist = variables |
|
1515 | 1520 | vdict = {} |
|
1516 | 1521 | cf = sys._getframe(1) |
|
1517 | 1522 | for name in vlist: |
|
1518 | 1523 | try: |
|
1519 | 1524 | vdict[name] = eval(name, cf.f_globals, cf.f_locals) |
|
1520 | 1525 | except: |
|
1521 | 1526 | print('Could not get variable %s from %s' % |
|
1522 | 1527 | (name,cf.f_code.co_name)) |
|
1523 | 1528 | else: |
|
1524 | 1529 | raise ValueError('variables must be a dict/str/list/tuple') |
|
1525 | 1530 | |
|
1526 | 1531 | # Propagate variables to user namespace |
|
1527 | 1532 | self.user_ns.update(vdict) |
|
1528 | 1533 | |
|
1529 | 1534 | # And configure interactive visibility |
|
1530 | 1535 | user_ns_hidden = self.user_ns_hidden |
|
1531 | 1536 | if interactive: |
|
1532 | 1537 | for name in vdict: |
|
1533 | 1538 | user_ns_hidden.pop(name, None) |
|
1534 | 1539 | else: |
|
1535 | 1540 | user_ns_hidden.update(vdict) |
|
1536 | 1541 | |
|
1537 | 1542 | def drop_by_id(self, variables): |
|
1538 | 1543 | """Remove a dict of variables from the user namespace, if they are the |
|
1539 | 1544 | same as the values in the dictionary. |
|
1540 | 1545 | |
|
1541 | 1546 | This is intended for use by extensions: variables that they've added can |
|
1542 | 1547 | be taken back out if they are unloaded, without removing any that the |
|
1543 | 1548 | user has overwritten. |
|
1544 | 1549 | |
|
1545 | 1550 | Parameters |
|
1546 | 1551 | ---------- |
|
1547 | 1552 | variables : dict |
|
1548 | 1553 | A dictionary mapping object names (as strings) to the objects. |
|
1549 | 1554 | """ |
|
1550 | 1555 | for name, obj in variables.items(): |
|
1551 | 1556 | if name in self.user_ns and self.user_ns[name] is obj: |
|
1552 | 1557 | del self.user_ns[name] |
|
1553 | 1558 | self.user_ns_hidden.pop(name, None) |
|
1554 | 1559 | |
|
1555 | 1560 | #------------------------------------------------------------------------- |
|
1556 | 1561 | # Things related to object introspection |
|
1557 | 1562 | #------------------------------------------------------------------------- |
|
1558 | 1563 | |
|
1559 | 1564 | def _ofind(self, oname, namespaces=None): |
|
1560 | 1565 | """Find an object in the available namespaces. |
|
1561 | 1566 | |
|
1562 | 1567 | self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic |
|
1563 | 1568 | |
|
1564 | 1569 | Has special code to detect magic functions. |
|
1565 | 1570 | """ |
|
1566 | 1571 | oname = oname.strip() |
|
1567 | 1572 | raw_parts = oname.split(".") |
|
1568 | 1573 | parts = [] |
|
1569 | 1574 | parts_ok = True |
|
1570 | 1575 | for p in raw_parts: |
|
1571 | 1576 | if p.endswith("]"): |
|
1572 | 1577 | var, *indices = p.split("[") |
|
1573 | 1578 | if not var.isidentifier(): |
|
1574 | 1579 | parts_ok = False |
|
1575 | 1580 | break |
|
1576 | 1581 | parts.append(var) |
|
1577 | 1582 | for ind in indices: |
|
1578 | 1583 | if ind[-1] != "]" and not is_integer_string(ind[:-1]): |
|
1579 | 1584 | parts_ok = False |
|
1580 | 1585 | break |
|
1581 | 1586 | parts.append(ind[:-1]) |
|
1582 | 1587 | continue |
|
1583 | 1588 | |
|
1584 | 1589 | if not p.isidentifier(): |
|
1585 | 1590 | parts_ok = False |
|
1586 | 1591 | parts.append(p) |
|
1587 | 1592 | |
|
1588 | 1593 | if ( |
|
1589 | 1594 | not oname.startswith(ESC_MAGIC) |
|
1590 | 1595 | and not oname.startswith(ESC_MAGIC2) |
|
1591 | 1596 | and not parts_ok |
|
1592 | 1597 | ): |
|
1593 | 1598 | return {"found": False} |
|
1594 | 1599 | |
|
1595 | 1600 | if namespaces is None: |
|
1596 | 1601 | # Namespaces to search in: |
|
1597 | 1602 | # Put them in a list. The order is important so that we |
|
1598 | 1603 | # find things in the same order that Python finds them. |
|
1599 | 1604 | namespaces = [ ('Interactive', self.user_ns), |
|
1600 | 1605 | ('Interactive (global)', self.user_global_ns), |
|
1601 | 1606 | ('Python builtin', builtin_mod.__dict__), |
|
1602 | 1607 | ] |
|
1603 | 1608 | |
|
1604 | 1609 | ismagic = False |
|
1605 | 1610 | isalias = False |
|
1606 | 1611 | found = False |
|
1607 | 1612 | ospace = None |
|
1608 | 1613 | parent = None |
|
1609 | 1614 | obj = None |
|
1610 | 1615 | |
|
1611 | 1616 | |
|
1612 | 1617 | # Look for the given name by splitting it in parts. If the head is |
|
1613 | 1618 | # found, then we look for all the remaining parts as members, and only |
|
1614 | 1619 | # declare success if we can find them all. |
|
1615 | 1620 | oname_parts = parts |
|
1616 | 1621 | oname_head, oname_rest = oname_parts[0],oname_parts[1:] |
|
1617 | 1622 | for nsname,ns in namespaces: |
|
1618 | 1623 | try: |
|
1619 | 1624 | obj = ns[oname_head] |
|
1620 | 1625 | except KeyError: |
|
1621 | 1626 | continue |
|
1622 | 1627 | else: |
|
1623 | 1628 | for idx, part in enumerate(oname_rest): |
|
1624 | 1629 | try: |
|
1625 | 1630 | parent = obj |
|
1626 | 1631 | # The last part is looked up in a special way to avoid |
|
1627 | 1632 | # descriptor invocation as it may raise or have side |
|
1628 | 1633 | # effects. |
|
1629 | 1634 | if idx == len(oname_rest) - 1: |
|
1630 | 1635 | obj = self._getattr_property(obj, part) |
|
1631 | 1636 | else: |
|
1632 | 1637 | if is_integer_string(part): |
|
1633 | 1638 | obj = obj[int(part)] |
|
1634 | 1639 | else: |
|
1635 | 1640 | obj = getattr(obj, part) |
|
1636 | 1641 | except: |
|
1637 | 1642 | # Blanket except b/c some badly implemented objects |
|
1638 | 1643 | # allow __getattr__ to raise exceptions other than |
|
1639 | 1644 | # AttributeError, which then crashes IPython. |
|
1640 | 1645 | break |
|
1641 | 1646 | else: |
|
1642 | 1647 | # If we finish the for loop (no break), we got all members |
|
1643 | 1648 | found = True |
|
1644 | 1649 | ospace = nsname |
|
1645 | 1650 | break # namespace loop |
|
1646 | 1651 | |
|
1647 | 1652 | # Try to see if it's magic |
|
1648 | 1653 | if not found: |
|
1649 | 1654 | obj = None |
|
1650 | 1655 | if oname.startswith(ESC_MAGIC2): |
|
1651 | 1656 | oname = oname.lstrip(ESC_MAGIC2) |
|
1652 | 1657 | obj = self.find_cell_magic(oname) |
|
1653 | 1658 | elif oname.startswith(ESC_MAGIC): |
|
1654 | 1659 | oname = oname.lstrip(ESC_MAGIC) |
|
1655 | 1660 | obj = self.find_line_magic(oname) |
|
1656 | 1661 | else: |
|
1657 | 1662 | # search without prefix, so run? will find %run? |
|
1658 | 1663 | obj = self.find_line_magic(oname) |
|
1659 | 1664 | if obj is None: |
|
1660 | 1665 | obj = self.find_cell_magic(oname) |
|
1661 | 1666 | if obj is not None: |
|
1662 | 1667 | found = True |
|
1663 | 1668 | ospace = 'IPython internal' |
|
1664 | 1669 | ismagic = True |
|
1665 | 1670 | isalias = isinstance(obj, Alias) |
|
1666 | 1671 | |
|
1667 | 1672 | # Last try: special-case some literals like '', [], {}, etc: |
|
1668 | 1673 | if not found and oname_head in ["''",'""','[]','{}','()']: |
|
1669 | 1674 | obj = eval(oname_head) |
|
1670 | 1675 | found = True |
|
1671 | 1676 | ospace = 'Interactive' |
|
1672 | 1677 | |
|
1673 | 1678 | return { |
|
1674 | 1679 | 'obj':obj, |
|
1675 | 1680 | 'found':found, |
|
1676 | 1681 | 'parent':parent, |
|
1677 | 1682 | 'ismagic':ismagic, |
|
1678 | 1683 | 'isalias':isalias, |
|
1679 | 1684 | 'namespace':ospace |
|
1680 | 1685 | } |
|
1681 | 1686 | |
|
1682 | 1687 | @staticmethod |
|
1683 | 1688 | def _getattr_property(obj, attrname): |
|
1684 | 1689 | """Property-aware getattr to use in object finding. |
|
1685 | 1690 | |
|
1686 | 1691 | If attrname represents a property, return it unevaluated (in case it has |
|
1687 | 1692 | side effects or raises an error. |
|
1688 | 1693 | |
|
1689 | 1694 | """ |
|
1690 | 1695 | if not isinstance(obj, type): |
|
1691 | 1696 | try: |
|
1692 | 1697 | # `getattr(type(obj), attrname)` is not guaranteed to return |
|
1693 | 1698 | # `obj`, but does so for property: |
|
1694 | 1699 | # |
|
1695 | 1700 | # property.__get__(self, None, cls) -> self |
|
1696 | 1701 | # |
|
1697 | 1702 | # The universal alternative is to traverse the mro manually |
|
1698 | 1703 | # searching for attrname in class dicts. |
|
1699 | 1704 | if is_integer_string(attrname): |
|
1700 | 1705 | return obj[int(attrname)] |
|
1701 | 1706 | else: |
|
1702 | 1707 | attr = getattr(type(obj), attrname) |
|
1703 | 1708 | except AttributeError: |
|
1704 | 1709 | pass |
|
1705 | 1710 | else: |
|
1706 | 1711 | # This relies on the fact that data descriptors (with both |
|
1707 | 1712 | # __get__ & __set__ magic methods) take precedence over |
|
1708 | 1713 | # instance-level attributes: |
|
1709 | 1714 | # |
|
1710 | 1715 | # class A(object): |
|
1711 | 1716 | # @property |
|
1712 | 1717 | # def foobar(self): return 123 |
|
1713 | 1718 | # a = A() |
|
1714 | 1719 | # a.__dict__['foobar'] = 345 |
|
1715 | 1720 | # a.foobar # == 123 |
|
1716 | 1721 | # |
|
1717 | 1722 | # So, a property may be returned right away. |
|
1718 | 1723 | if isinstance(attr, property): |
|
1719 | 1724 | return attr |
|
1720 | 1725 | |
|
1721 | 1726 | # Nothing helped, fall back. |
|
1722 | 1727 | return getattr(obj, attrname) |
|
1723 | 1728 | |
|
1724 | 1729 | def _object_find(self, oname, namespaces=None): |
|
1725 | 1730 | """Find an object and return a struct with info about it.""" |
|
1726 | 1731 | return Struct(self._ofind(oname, namespaces)) |
|
1727 | 1732 | |
|
1728 | 1733 | def _inspect(self, meth, oname, namespaces=None, **kw): |
|
1729 | 1734 | """Generic interface to the inspector system. |
|
1730 | 1735 | |
|
1731 | 1736 | This function is meant to be called by pdef, pdoc & friends. |
|
1732 | 1737 | """ |
|
1733 | 1738 | info = self._object_find(oname, namespaces) |
|
1734 | 1739 | docformat = ( |
|
1735 | 1740 | sphinxify(self.object_inspect(oname)) if self.sphinxify_docstring else None |
|
1736 | 1741 | ) |
|
1737 | 1742 | if info.found: |
|
1738 | 1743 | pmethod = getattr(self.inspector, meth) |
|
1739 | 1744 | # TODO: only apply format_screen to the plain/text repr of the mime |
|
1740 | 1745 | # bundle. |
|
1741 | 1746 | formatter = format_screen if info.ismagic else docformat |
|
1742 | 1747 | if meth == 'pdoc': |
|
1743 | 1748 | pmethod(info.obj, oname, formatter) |
|
1744 | 1749 | elif meth == 'pinfo': |
|
1745 | 1750 | pmethod( |
|
1746 | 1751 | info.obj, |
|
1747 | 1752 | oname, |
|
1748 | 1753 | formatter, |
|
1749 | 1754 | info, |
|
1750 | 1755 | enable_html_pager=self.enable_html_pager, |
|
1751 | 1756 | **kw, |
|
1752 | 1757 | ) |
|
1753 | 1758 | else: |
|
1754 | 1759 | pmethod(info.obj, oname) |
|
1755 | 1760 | else: |
|
1756 | 1761 | print('Object `%s` not found.' % oname) |
|
1757 | 1762 | return 'not found' # so callers can take other action |
|
1758 | 1763 | |
|
1759 | 1764 | def object_inspect(self, oname, detail_level=0): |
|
1760 | 1765 | """Get object info about oname""" |
|
1761 | 1766 | with self.builtin_trap: |
|
1762 | 1767 | info = self._object_find(oname) |
|
1763 | 1768 | if info.found: |
|
1764 | 1769 | return self.inspector.info(info.obj, oname, info=info, |
|
1765 | 1770 | detail_level=detail_level |
|
1766 | 1771 | ) |
|
1767 | 1772 | else: |
|
1768 | 1773 | return oinspect.object_info(name=oname, found=False) |
|
1769 | 1774 | |
|
1770 | 1775 | def object_inspect_text(self, oname, detail_level=0): |
|
1771 | 1776 | """Get object info as formatted text""" |
|
1772 | 1777 | return self.object_inspect_mime(oname, detail_level)['text/plain'] |
|
1773 | 1778 | |
|
1774 | 1779 | def object_inspect_mime(self, oname, detail_level=0, omit_sections=()): |
|
1775 | 1780 | """Get object info as a mimebundle of formatted representations. |
|
1776 | 1781 | |
|
1777 | 1782 | A mimebundle is a dictionary, keyed by mime-type. |
|
1778 | 1783 | It must always have the key `'text/plain'`. |
|
1779 | 1784 | """ |
|
1780 | 1785 | with self.builtin_trap: |
|
1781 | 1786 | info = self._object_find(oname) |
|
1782 | 1787 | if info.found: |
|
1783 | 1788 | docformat = ( |
|
1784 | 1789 | sphinxify(self.object_inspect(oname)) |
|
1785 | 1790 | if self.sphinxify_docstring |
|
1786 | 1791 | else None |
|
1787 | 1792 | ) |
|
1788 | 1793 | return self.inspector._get_info( |
|
1789 | 1794 | info.obj, |
|
1790 | 1795 | oname, |
|
1791 | 1796 | info=info, |
|
1792 | 1797 | detail_level=detail_level, |
|
1793 | 1798 | formatter=docformat, |
|
1794 | 1799 | omit_sections=omit_sections, |
|
1795 | 1800 | ) |
|
1796 | 1801 | else: |
|
1797 | 1802 | raise KeyError(oname) |
|
1798 | 1803 | |
|
1799 | 1804 | #------------------------------------------------------------------------- |
|
1800 | 1805 | # Things related to history management |
|
1801 | 1806 | #------------------------------------------------------------------------- |
|
1802 | 1807 | |
|
1803 | 1808 | def init_history(self): |
|
1804 | 1809 | """Sets up the command history, and starts regular autosaves.""" |
|
1805 | 1810 | self.history_manager = HistoryManager(shell=self, parent=self) |
|
1806 | 1811 | self.configurables.append(self.history_manager) |
|
1807 | 1812 | |
|
1808 | 1813 | #------------------------------------------------------------------------- |
|
1809 | 1814 | # Things related to exception handling and tracebacks (not debugging) |
|
1810 | 1815 | #------------------------------------------------------------------------- |
|
1811 | 1816 | |
|
1812 | 1817 | debugger_cls = InterruptiblePdb |
|
1813 | 1818 | |
|
1814 | 1819 | def init_traceback_handlers(self, custom_exceptions): |
|
1815 | 1820 | # Syntax error handler. |
|
1816 | 1821 | self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self) |
|
1817 | 1822 | |
|
1818 | 1823 | # The interactive one is initialized with an offset, meaning we always |
|
1819 | 1824 | # want to remove the topmost item in the traceback, which is our own |
|
1820 | 1825 | # internal code. Valid modes: ['Plain','Context','Verbose','Minimal'] |
|
1821 | 1826 | self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain', |
|
1822 | 1827 | color_scheme='NoColor', |
|
1823 | 1828 | tb_offset = 1, |
|
1824 | 1829 | debugger_cls=self.debugger_cls, parent=self) |
|
1825 | 1830 | |
|
1826 | 1831 | # The instance will store a pointer to the system-wide exception hook, |
|
1827 | 1832 | # so that runtime code (such as magics) can access it. This is because |
|
1828 | 1833 | # during the read-eval loop, it may get temporarily overwritten. |
|
1829 | 1834 | self.sys_excepthook = sys.excepthook |
|
1830 | 1835 | |
|
1831 | 1836 | # and add any custom exception handlers the user may have specified |
|
1832 | 1837 | self.set_custom_exc(*custom_exceptions) |
|
1833 | 1838 | |
|
1834 | 1839 | # Set the exception mode |
|
1835 | 1840 | self.InteractiveTB.set_mode(mode=self.xmode) |
|
1836 | 1841 | |
|
1837 | 1842 | def set_custom_exc(self, exc_tuple, handler): |
|
1838 | 1843 | """set_custom_exc(exc_tuple, handler) |
|
1839 | 1844 | |
|
1840 | 1845 | Set a custom exception handler, which will be called if any of the |
|
1841 | 1846 | exceptions in exc_tuple occur in the mainloop (specifically, in the |
|
1842 | 1847 | run_code() method). |
|
1843 | 1848 | |
|
1844 | 1849 | Parameters |
|
1845 | 1850 | ---------- |
|
1846 | 1851 | exc_tuple : tuple of exception classes |
|
1847 | 1852 | A *tuple* of exception classes, for which to call the defined |
|
1848 | 1853 | handler. It is very important that you use a tuple, and NOT A |
|
1849 | 1854 | LIST here, because of the way Python's except statement works. If |
|
1850 | 1855 | you only want to trap a single exception, use a singleton tuple:: |
|
1851 | 1856 | |
|
1852 | 1857 | exc_tuple == (MyCustomException,) |
|
1853 | 1858 | |
|
1854 | 1859 | handler : callable |
|
1855 | 1860 | handler must have the following signature:: |
|
1856 | 1861 | |
|
1857 | 1862 | def my_handler(self, etype, value, tb, tb_offset=None): |
|
1858 | 1863 | ... |
|
1859 | 1864 | return structured_traceback |
|
1860 | 1865 | |
|
1861 | 1866 | Your handler must return a structured traceback (a list of strings), |
|
1862 | 1867 | or None. |
|
1863 | 1868 | |
|
1864 | 1869 | This will be made into an instance method (via types.MethodType) |
|
1865 | 1870 | of IPython itself, and it will be called if any of the exceptions |
|
1866 | 1871 | listed in the exc_tuple are caught. If the handler is None, an |
|
1867 | 1872 | internal basic one is used, which just prints basic info. |
|
1868 | 1873 | |
|
1869 | 1874 | To protect IPython from crashes, if your handler ever raises an |
|
1870 | 1875 | exception or returns an invalid result, it will be immediately |
|
1871 | 1876 | disabled. |
|
1872 | 1877 | |
|
1873 | 1878 | Notes |
|
1874 | 1879 | ----- |
|
1875 | 1880 | WARNING: by putting in your own exception handler into IPython's main |
|
1876 | 1881 | execution loop, you run a very good chance of nasty crashes. This |
|
1877 | 1882 | facility should only be used if you really know what you are doing. |
|
1878 | 1883 | """ |
|
1879 | 1884 | |
|
1880 | 1885 | if not isinstance(exc_tuple, tuple): |
|
1881 | 1886 | raise TypeError("The custom exceptions must be given as a tuple.") |
|
1882 | 1887 | |
|
1883 | 1888 | def dummy_handler(self, etype, value, tb, tb_offset=None): |
|
1884 | 1889 | print('*** Simple custom exception handler ***') |
|
1885 | 1890 | print('Exception type :', etype) |
|
1886 | 1891 | print('Exception value:', value) |
|
1887 | 1892 | print('Traceback :', tb) |
|
1888 | 1893 | |
|
1889 | 1894 | def validate_stb(stb): |
|
1890 | 1895 | """validate structured traceback return type |
|
1891 | 1896 | |
|
1892 | 1897 | return type of CustomTB *should* be a list of strings, but allow |
|
1893 | 1898 | single strings or None, which are harmless. |
|
1894 | 1899 | |
|
1895 | 1900 | This function will *always* return a list of strings, |
|
1896 | 1901 | and will raise a TypeError if stb is inappropriate. |
|
1897 | 1902 | """ |
|
1898 | 1903 | msg = "CustomTB must return list of strings, not %r" % stb |
|
1899 | 1904 | if stb is None: |
|
1900 | 1905 | return [] |
|
1901 | 1906 | elif isinstance(stb, str): |
|
1902 | 1907 | return [stb] |
|
1903 | 1908 | elif not isinstance(stb, list): |
|
1904 | 1909 | raise TypeError(msg) |
|
1905 | 1910 | # it's a list |
|
1906 | 1911 | for line in stb: |
|
1907 | 1912 | # check every element |
|
1908 | 1913 | if not isinstance(line, str): |
|
1909 | 1914 | raise TypeError(msg) |
|
1910 | 1915 | return stb |
|
1911 | 1916 | |
|
1912 | 1917 | if handler is None: |
|
1913 | 1918 | wrapped = dummy_handler |
|
1914 | 1919 | else: |
|
1915 | 1920 | def wrapped(self,etype,value,tb,tb_offset=None): |
|
1916 | 1921 | """wrap CustomTB handler, to protect IPython from user code |
|
1917 | 1922 | |
|
1918 | 1923 | This makes it harder (but not impossible) for custom exception |
|
1919 | 1924 | handlers to crash IPython. |
|
1920 | 1925 | """ |
|
1921 | 1926 | try: |
|
1922 | 1927 | stb = handler(self,etype,value,tb,tb_offset=tb_offset) |
|
1923 | 1928 | return validate_stb(stb) |
|
1924 | 1929 | except: |
|
1925 | 1930 | # clear custom handler immediately |
|
1926 | 1931 | self.set_custom_exc((), None) |
|
1927 | 1932 | print("Custom TB Handler failed, unregistering", file=sys.stderr) |
|
1928 | 1933 | # show the exception in handler first |
|
1929 | 1934 | stb = self.InteractiveTB.structured_traceback(*sys.exc_info()) |
|
1930 | 1935 | print(self.InteractiveTB.stb2text(stb)) |
|
1931 | 1936 | print("The original exception:") |
|
1932 | 1937 | stb = self.InteractiveTB.structured_traceback( |
|
1933 | 1938 | (etype,value,tb), tb_offset=tb_offset |
|
1934 | 1939 | ) |
|
1935 | 1940 | return stb |
|
1936 | 1941 | |
|
1937 | 1942 | self.CustomTB = types.MethodType(wrapped,self) |
|
1938 | 1943 | self.custom_exceptions = exc_tuple |
|
1939 | 1944 | |
|
1940 | 1945 | def excepthook(self, etype, value, tb): |
|
1941 | 1946 | """One more defense for GUI apps that call sys.excepthook. |
|
1942 | 1947 | |
|
1943 | 1948 | GUI frameworks like wxPython trap exceptions and call |
|
1944 | 1949 | sys.excepthook themselves. I guess this is a feature that |
|
1945 | 1950 | enables them to keep running after exceptions that would |
|
1946 | 1951 | otherwise kill their mainloop. This is a bother for IPython |
|
1947 | 1952 | which expects to catch all of the program exceptions with a try: |
|
1948 | 1953 | except: statement. |
|
1949 | 1954 | |
|
1950 | 1955 | Normally, IPython sets sys.excepthook to a CrashHandler instance, so if |
|
1951 | 1956 | any app directly invokes sys.excepthook, it will look to the user like |
|
1952 | 1957 | IPython crashed. In order to work around this, we can disable the |
|
1953 | 1958 | CrashHandler and replace it with this excepthook instead, which prints a |
|
1954 | 1959 | regular traceback using our InteractiveTB. In this fashion, apps which |
|
1955 | 1960 | call sys.excepthook will generate a regular-looking exception from |
|
1956 | 1961 | IPython, and the CrashHandler will only be triggered by real IPython |
|
1957 | 1962 | crashes. |
|
1958 | 1963 | |
|
1959 | 1964 | This hook should be used sparingly, only in places which are not likely |
|
1960 | 1965 | to be true IPython errors. |
|
1961 | 1966 | """ |
|
1962 | 1967 | self.showtraceback((etype, value, tb), tb_offset=0) |
|
1963 | 1968 | |
|
1964 | 1969 | def _get_exc_info(self, exc_tuple=None): |
|
1965 | 1970 | """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc. |
|
1966 | 1971 | |
|
1967 | 1972 | Ensures sys.last_type,value,traceback hold the exc_info we found, |
|
1968 | 1973 | from whichever source. |
|
1969 | 1974 | |
|
1970 | 1975 | raises ValueError if none of these contain any information |
|
1971 | 1976 | """ |
|
1972 | 1977 | if exc_tuple is None: |
|
1973 | 1978 | etype, value, tb = sys.exc_info() |
|
1974 | 1979 | else: |
|
1975 | 1980 | etype, value, tb = exc_tuple |
|
1976 | 1981 | |
|
1977 | 1982 | if etype is None: |
|
1978 | 1983 | if hasattr(sys, 'last_type'): |
|
1979 | 1984 | etype, value, tb = sys.last_type, sys.last_value, \ |
|
1980 | 1985 | sys.last_traceback |
|
1981 | 1986 | |
|
1982 | 1987 | if etype is None: |
|
1983 | 1988 | raise ValueError("No exception to find") |
|
1984 | 1989 | |
|
1985 | 1990 | # Now store the exception info in sys.last_type etc. |
|
1986 | 1991 | # WARNING: these variables are somewhat deprecated and not |
|
1987 | 1992 | # necessarily safe to use in a threaded environment, but tools |
|
1988 | 1993 | # like pdb depend on their existence, so let's set them. If we |
|
1989 | 1994 | # find problems in the field, we'll need to revisit their use. |
|
1990 | 1995 | sys.last_type = etype |
|
1991 | 1996 | sys.last_value = value |
|
1992 | 1997 | sys.last_traceback = tb |
|
1993 | 1998 | |
|
1994 | 1999 | return etype, value, tb |
|
1995 | 2000 | |
|
1996 | 2001 | def show_usage_error(self, exc): |
|
1997 | 2002 | """Show a short message for UsageErrors |
|
1998 | 2003 | |
|
1999 | 2004 | These are special exceptions that shouldn't show a traceback. |
|
2000 | 2005 | """ |
|
2001 | 2006 | print("UsageError: %s" % exc, file=sys.stderr) |
|
2002 | 2007 | |
|
2003 | 2008 | def get_exception_only(self, exc_tuple=None): |
|
2004 | 2009 | """ |
|
2005 | 2010 | Return as a string (ending with a newline) the exception that |
|
2006 | 2011 | just occurred, without any traceback. |
|
2007 | 2012 | """ |
|
2008 | 2013 | etype, value, tb = self._get_exc_info(exc_tuple) |
|
2009 | 2014 | msg = traceback.format_exception_only(etype, value) |
|
2010 | 2015 | return ''.join(msg) |
|
2011 | 2016 | |
|
2012 | 2017 | def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None, |
|
2013 | 2018 | exception_only=False, running_compiled_code=False): |
|
2014 | 2019 | """Display the exception that just occurred. |
|
2015 | 2020 | |
|
2016 | 2021 | If nothing is known about the exception, this is the method which |
|
2017 | 2022 | should be used throughout the code for presenting user tracebacks, |
|
2018 | 2023 | rather than directly invoking the InteractiveTB object. |
|
2019 | 2024 | |
|
2020 | 2025 | A specific showsyntaxerror() also exists, but this method can take |
|
2021 | 2026 | care of calling it if needed, so unless you are explicitly catching a |
|
2022 | 2027 | SyntaxError exception, don't try to analyze the stack manually and |
|
2023 | 2028 | simply call this method.""" |
|
2024 | 2029 | |
|
2025 | 2030 | try: |
|
2026 | 2031 | try: |
|
2027 | 2032 | etype, value, tb = self._get_exc_info(exc_tuple) |
|
2028 | 2033 | except ValueError: |
|
2029 | 2034 | print('No traceback available to show.', file=sys.stderr) |
|
2030 | 2035 | return |
|
2031 | 2036 | |
|
2032 | 2037 | if issubclass(etype, SyntaxError): |
|
2033 | 2038 | # Though this won't be called by syntax errors in the input |
|
2034 | 2039 | # line, there may be SyntaxError cases with imported code. |
|
2035 | 2040 | self.showsyntaxerror(filename, running_compiled_code) |
|
2036 | 2041 | elif etype is UsageError: |
|
2037 | 2042 | self.show_usage_error(value) |
|
2038 | 2043 | else: |
|
2039 | 2044 | if exception_only: |
|
2040 | 2045 | stb = ['An exception has occurred, use %tb to see ' |
|
2041 | 2046 | 'the full traceback.\n'] |
|
2042 | 2047 | stb.extend(self.InteractiveTB.get_exception_only(etype, |
|
2043 | 2048 | value)) |
|
2044 | 2049 | else: |
|
2045 | 2050 | try: |
|
2046 | 2051 | # Exception classes can customise their traceback - we |
|
2047 | 2052 | # use this in IPython.parallel for exceptions occurring |
|
2048 | 2053 | # in the engines. This should return a list of strings. |
|
2049 | 2054 | if hasattr(value, "_render_traceback_"): |
|
2050 | 2055 | stb = value._render_traceback_() |
|
2051 | 2056 | else: |
|
2052 | 2057 | stb = self.InteractiveTB.structured_traceback( |
|
2053 | 2058 | etype, value, tb, tb_offset=tb_offset |
|
2054 | 2059 | ) |
|
2055 | 2060 | |
|
2056 | 2061 | except Exception: |
|
2057 | 2062 | print( |
|
2058 | 2063 | "Unexpected exception formatting exception. Falling back to standard exception" |
|
2059 | 2064 | ) |
|
2060 | 2065 | traceback.print_exc() |
|
2061 | 2066 | return None |
|
2062 | 2067 | |
|
2063 | 2068 | self._showtraceback(etype, value, stb) |
|
2064 | 2069 | if self.call_pdb: |
|
2065 | 2070 | # drop into debugger |
|
2066 | 2071 | self.debugger(force=True) |
|
2067 | 2072 | return |
|
2068 | 2073 | |
|
2069 | 2074 | # Actually show the traceback |
|
2070 | 2075 | self._showtraceback(etype, value, stb) |
|
2071 | 2076 | |
|
2072 | 2077 | except KeyboardInterrupt: |
|
2073 | 2078 | print('\n' + self.get_exception_only(), file=sys.stderr) |
|
2074 | 2079 | |
|
2075 | 2080 | def _showtraceback(self, etype, evalue, stb: str): |
|
2076 | 2081 | """Actually show a traceback. |
|
2077 | 2082 | |
|
2078 | 2083 | Subclasses may override this method to put the traceback on a different |
|
2079 | 2084 | place, like a side channel. |
|
2080 | 2085 | """ |
|
2081 | 2086 | val = self.InteractiveTB.stb2text(stb) |
|
2082 | 2087 | try: |
|
2083 | 2088 | print(val) |
|
2084 | 2089 | except UnicodeEncodeError: |
|
2085 | 2090 | print(val.encode("utf-8", "backslashreplace").decode()) |
|
2086 | 2091 | |
|
2087 | 2092 | def showsyntaxerror(self, filename=None, running_compiled_code=False): |
|
2088 | 2093 | """Display the syntax error that just occurred. |
|
2089 | 2094 | |
|
2090 | 2095 | This doesn't display a stack trace because there isn't one. |
|
2091 | 2096 | |
|
2092 | 2097 | If a filename is given, it is stuffed in the exception instead |
|
2093 | 2098 | of what was there before (because Python's parser always uses |
|
2094 | 2099 | "<string>" when reading from a string). |
|
2095 | 2100 | |
|
2096 | 2101 | If the syntax error occurred when running a compiled code (i.e. running_compile_code=True), |
|
2097 | 2102 | longer stack trace will be displayed. |
|
2098 | 2103 | """ |
|
2099 | 2104 | etype, value, last_traceback = self._get_exc_info() |
|
2100 | 2105 | |
|
2101 | 2106 | if filename and issubclass(etype, SyntaxError): |
|
2102 | 2107 | try: |
|
2103 | 2108 | value.filename = filename |
|
2104 | 2109 | except: |
|
2105 | 2110 | # Not the format we expect; leave it alone |
|
2106 | 2111 | pass |
|
2107 | 2112 | |
|
2108 | 2113 | # If the error occurred when executing compiled code, we should provide full stacktrace. |
|
2109 | 2114 | elist = traceback.extract_tb(last_traceback) if running_compiled_code else [] |
|
2110 | 2115 | stb = self.SyntaxTB.structured_traceback(etype, value, elist) |
|
2111 | 2116 | self._showtraceback(etype, value, stb) |
|
2112 | 2117 | |
|
2113 | 2118 | # This is overridden in TerminalInteractiveShell to show a message about |
|
2114 | 2119 | # the %paste magic. |
|
2115 | 2120 | def showindentationerror(self): |
|
2116 | 2121 | """Called by _run_cell when there's an IndentationError in code entered |
|
2117 | 2122 | at the prompt. |
|
2118 | 2123 | |
|
2119 | 2124 | This is overridden in TerminalInteractiveShell to show a message about |
|
2120 | 2125 | the %paste magic.""" |
|
2121 | 2126 | self.showsyntaxerror() |
|
2122 | 2127 | |
|
2123 | 2128 | @skip_doctest |
|
2124 | 2129 | def set_next_input(self, s, replace=False): |
|
2125 | 2130 | """ Sets the 'default' input string for the next command line. |
|
2126 | 2131 | |
|
2127 | 2132 | Example:: |
|
2128 | 2133 | |
|
2129 | 2134 | In [1]: _ip.set_next_input("Hello Word") |
|
2130 | 2135 | In [2]: Hello Word_ # cursor is here |
|
2131 | 2136 | """ |
|
2132 | 2137 | self.rl_next_input = s |
|
2133 | 2138 | |
|
2134 | 2139 | def _indent_current_str(self): |
|
2135 | 2140 | """return the current level of indentation as a string""" |
|
2136 | 2141 | return self.input_splitter.get_indent_spaces() * ' ' |
|
2137 | 2142 | |
|
2138 | 2143 | #------------------------------------------------------------------------- |
|
2139 | 2144 | # Things related to text completion |
|
2140 | 2145 | #------------------------------------------------------------------------- |
|
2141 | 2146 | |
|
2142 | 2147 | def init_completer(self): |
|
2143 | 2148 | """Initialize the completion machinery. |
|
2144 | 2149 | |
|
2145 | 2150 | This creates completion machinery that can be used by client code, |
|
2146 | 2151 | either interactively in-process (typically triggered by the readline |
|
2147 | 2152 | library), programmatically (such as in test suites) or out-of-process |
|
2148 | 2153 | (typically over the network by remote frontends). |
|
2149 | 2154 | """ |
|
2150 | 2155 | from IPython.core.completer import IPCompleter |
|
2151 | 2156 | from IPython.core.completerlib import ( |
|
2152 | 2157 | cd_completer, |
|
2153 | 2158 | magic_run_completer, |
|
2154 | 2159 | module_completer, |
|
2155 | 2160 | reset_completer, |
|
2156 | 2161 | ) |
|
2157 | 2162 | |
|
2158 | 2163 | self.Completer = IPCompleter(shell=self, |
|
2159 | 2164 | namespace=self.user_ns, |
|
2160 | 2165 | global_namespace=self.user_global_ns, |
|
2161 | 2166 | parent=self, |
|
2162 | 2167 | ) |
|
2163 | 2168 | self.configurables.append(self.Completer) |
|
2164 | 2169 | |
|
2165 | 2170 | # Add custom completers to the basic ones built into IPCompleter |
|
2166 | 2171 | sdisp = self.strdispatchers.get('complete_command', StrDispatch()) |
|
2167 | 2172 | self.strdispatchers['complete_command'] = sdisp |
|
2168 | 2173 | self.Completer.custom_completers = sdisp |
|
2169 | 2174 | |
|
2170 | 2175 | self.set_hook('complete_command', module_completer, str_key = 'import') |
|
2171 | 2176 | self.set_hook('complete_command', module_completer, str_key = 'from') |
|
2172 | 2177 | self.set_hook('complete_command', module_completer, str_key = '%aimport') |
|
2173 | 2178 | self.set_hook('complete_command', magic_run_completer, str_key = '%run') |
|
2174 | 2179 | self.set_hook('complete_command', cd_completer, str_key = '%cd') |
|
2175 | 2180 | self.set_hook('complete_command', reset_completer, str_key = '%reset') |
|
2176 | 2181 | |
|
2177 | 2182 | @skip_doctest |
|
2178 | 2183 | def complete(self, text, line=None, cursor_pos=None): |
|
2179 | 2184 | """Return the completed text and a list of completions. |
|
2180 | 2185 | |
|
2181 | 2186 | Parameters |
|
2182 | 2187 | ---------- |
|
2183 | 2188 | text : string |
|
2184 | 2189 | A string of text to be completed on. It can be given as empty and |
|
2185 | 2190 | instead a line/position pair are given. In this case, the |
|
2186 | 2191 | completer itself will split the line like readline does. |
|
2187 | 2192 | line : string, optional |
|
2188 | 2193 | The complete line that text is part of. |
|
2189 | 2194 | cursor_pos : int, optional |
|
2190 | 2195 | The position of the cursor on the input line. |
|
2191 | 2196 | |
|
2192 | 2197 | Returns |
|
2193 | 2198 | ------- |
|
2194 | 2199 | text : string |
|
2195 | 2200 | The actual text that was completed. |
|
2196 | 2201 | matches : list |
|
2197 | 2202 | A sorted list with all possible completions. |
|
2198 | 2203 | |
|
2199 | 2204 | Notes |
|
2200 | 2205 | ----- |
|
2201 | 2206 | The optional arguments allow the completion to take more context into |
|
2202 | 2207 | account, and are part of the low-level completion API. |
|
2203 | 2208 | |
|
2204 | 2209 | This is a wrapper around the completion mechanism, similar to what |
|
2205 | 2210 | readline does at the command line when the TAB key is hit. By |
|
2206 | 2211 | exposing it as a method, it can be used by other non-readline |
|
2207 | 2212 | environments (such as GUIs) for text completion. |
|
2208 | 2213 | |
|
2209 | 2214 | Examples |
|
2210 | 2215 | -------- |
|
2211 | 2216 | In [1]: x = 'hello' |
|
2212 | 2217 | |
|
2213 | 2218 | In [2]: _ip.complete('x.l') |
|
2214 | 2219 | Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip']) |
|
2215 | 2220 | """ |
|
2216 | 2221 | |
|
2217 | 2222 | # Inject names into __builtin__ so we can complete on the added names. |
|
2218 | 2223 | with self.builtin_trap: |
|
2219 | 2224 | return self.Completer.complete(text, line, cursor_pos) |
|
2220 | 2225 | |
|
2221 | 2226 | def set_custom_completer(self, completer, pos=0) -> None: |
|
2222 | 2227 | """Adds a new custom completer function. |
|
2223 | 2228 | |
|
2224 | 2229 | The position argument (defaults to 0) is the index in the completers |
|
2225 | 2230 | list where you want the completer to be inserted. |
|
2226 | 2231 | |
|
2227 | 2232 | `completer` should have the following signature:: |
|
2228 | 2233 | |
|
2229 | 2234 | def completion(self: Completer, text: string) -> List[str]: |
|
2230 | 2235 | raise NotImplementedError |
|
2231 | 2236 | |
|
2232 | 2237 | It will be bound to the current Completer instance and pass some text |
|
2233 | 2238 | and return a list with current completions to suggest to the user. |
|
2234 | 2239 | """ |
|
2235 | 2240 | |
|
2236 | 2241 | newcomp = types.MethodType(completer, self.Completer) |
|
2237 | 2242 | self.Completer.custom_matchers.insert(pos,newcomp) |
|
2238 | 2243 | |
|
2239 | 2244 | def set_completer_frame(self, frame=None): |
|
2240 | 2245 | """Set the frame of the completer.""" |
|
2241 | 2246 | if frame: |
|
2242 | 2247 | self.Completer.namespace = frame.f_locals |
|
2243 | 2248 | self.Completer.global_namespace = frame.f_globals |
|
2244 | 2249 | else: |
|
2245 | 2250 | self.Completer.namespace = self.user_ns |
|
2246 | 2251 | self.Completer.global_namespace = self.user_global_ns |
|
2247 | 2252 | |
|
2248 | 2253 | #------------------------------------------------------------------------- |
|
2249 | 2254 | # Things related to magics |
|
2250 | 2255 | #------------------------------------------------------------------------- |
|
2251 | 2256 | |
|
2252 | 2257 | def init_magics(self): |
|
2253 | 2258 | from IPython.core import magics as m |
|
2254 | 2259 | self.magics_manager = magic.MagicsManager(shell=self, |
|
2255 | 2260 | parent=self, |
|
2256 | 2261 | user_magics=m.UserMagics(self)) |
|
2257 | 2262 | self.configurables.append(self.magics_manager) |
|
2258 | 2263 | |
|
2259 | 2264 | # Expose as public API from the magics manager |
|
2260 | 2265 | self.register_magics = self.magics_manager.register |
|
2261 | 2266 | |
|
2262 | 2267 | self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics, |
|
2263 | 2268 | m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics, |
|
2264 | 2269 | m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics, |
|
2265 | 2270 | m.NamespaceMagics, m.OSMagics, m.PackagingMagics, |
|
2266 | 2271 | m.PylabMagics, m.ScriptMagics, |
|
2267 | 2272 | ) |
|
2268 | 2273 | self.register_magics(m.AsyncMagics) |
|
2269 | 2274 | |
|
2270 | 2275 | # Register Magic Aliases |
|
2271 | 2276 | mman = self.magics_manager |
|
2272 | 2277 | # FIXME: magic aliases should be defined by the Magics classes |
|
2273 | 2278 | # or in MagicsManager, not here |
|
2274 | 2279 | mman.register_alias('ed', 'edit') |
|
2275 | 2280 | mman.register_alias('hist', 'history') |
|
2276 | 2281 | mman.register_alias('rep', 'recall') |
|
2277 | 2282 | mman.register_alias('SVG', 'svg', 'cell') |
|
2278 | 2283 | mman.register_alias('HTML', 'html', 'cell') |
|
2279 | 2284 | mman.register_alias('file', 'writefile', 'cell') |
|
2280 | 2285 | |
|
2281 | 2286 | # FIXME: Move the color initialization to the DisplayHook, which |
|
2282 | 2287 | # should be split into a prompt manager and displayhook. We probably |
|
2283 | 2288 | # even need a centralize colors management object. |
|
2284 | 2289 | self.run_line_magic('colors', self.colors) |
|
2285 | 2290 | |
|
2286 | 2291 | # Defined here so that it's included in the documentation |
|
2287 | 2292 | @functools.wraps(magic.MagicsManager.register_function) |
|
2288 | 2293 | def register_magic_function(self, func, magic_kind='line', magic_name=None): |
|
2289 | 2294 | self.magics_manager.register_function( |
|
2290 | 2295 | func, magic_kind=magic_kind, magic_name=magic_name |
|
2291 | 2296 | ) |
|
2292 | 2297 | |
|
2293 | 2298 | def _find_with_lazy_load(self, /, type_, magic_name: str): |
|
2294 | 2299 | """ |
|
2295 | 2300 | Try to find a magic potentially lazy-loading it. |
|
2296 | 2301 | |
|
2297 | 2302 | Parameters |
|
2298 | 2303 | ---------- |
|
2299 | 2304 | |
|
2300 | 2305 | type_: "line"|"cell" |
|
2301 | 2306 | the type of magics we are trying to find/lazy load. |
|
2302 | 2307 | magic_name: str |
|
2303 | 2308 | The name of the magic we are trying to find/lazy load |
|
2304 | 2309 | |
|
2305 | 2310 | |
|
2306 | 2311 | Note that this may have any side effects |
|
2307 | 2312 | """ |
|
2308 | 2313 | finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_] |
|
2309 | 2314 | fn = finder(magic_name) |
|
2310 | 2315 | if fn is not None: |
|
2311 | 2316 | return fn |
|
2312 | 2317 | lazy = self.magics_manager.lazy_magics.get(magic_name) |
|
2313 | 2318 | if lazy is None: |
|
2314 | 2319 | return None |
|
2315 | 2320 | |
|
2316 | 2321 | self.run_line_magic("load_ext", lazy) |
|
2317 | 2322 | res = finder(magic_name) |
|
2318 | 2323 | return res |
|
2319 | 2324 | |
|
2320 | 2325 | def run_line_magic(self, magic_name: str, line, _stack_depth=1): |
|
2321 | 2326 | """Execute the given line magic. |
|
2322 | 2327 | |
|
2323 | 2328 | Parameters |
|
2324 | 2329 | ---------- |
|
2325 | 2330 | magic_name : str |
|
2326 | 2331 | Name of the desired magic function, without '%' prefix. |
|
2327 | 2332 | line : str |
|
2328 | 2333 | The rest of the input line as a single string. |
|
2329 | 2334 | _stack_depth : int |
|
2330 | 2335 | If run_line_magic() is called from magic() then _stack_depth=2. |
|
2331 | 2336 | This is added to ensure backward compatibility for use of 'get_ipython().magic()' |
|
2332 | 2337 | """ |
|
2333 | 2338 | fn = self._find_with_lazy_load("line", magic_name) |
|
2334 | 2339 | if fn is None: |
|
2335 | 2340 | lazy = self.magics_manager.lazy_magics.get(magic_name) |
|
2336 | 2341 | if lazy: |
|
2337 | 2342 | self.run_line_magic("load_ext", lazy) |
|
2338 | 2343 | fn = self.find_line_magic(magic_name) |
|
2339 | 2344 | if fn is None: |
|
2340 | 2345 | cm = self.find_cell_magic(magic_name) |
|
2341 | 2346 | etpl = "Line magic function `%%%s` not found%s." |
|
2342 | 2347 | extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, ' |
|
2343 | 2348 | 'did you mean that instead?)' % magic_name ) |
|
2344 | 2349 | raise UsageError(etpl % (magic_name, extra)) |
|
2345 | 2350 | else: |
|
2346 | 2351 | # Note: this is the distance in the stack to the user's frame. |
|
2347 | 2352 | # This will need to be updated if the internal calling logic gets |
|
2348 | 2353 | # refactored, or else we'll be expanding the wrong variables. |
|
2349 | 2354 | |
|
2350 | 2355 | # Determine stack_depth depending on where run_line_magic() has been called |
|
2351 | 2356 | stack_depth = _stack_depth |
|
2352 | 2357 | if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False): |
|
2353 | 2358 | # magic has opted out of var_expand |
|
2354 | 2359 | magic_arg_s = line |
|
2355 | 2360 | else: |
|
2356 | 2361 | magic_arg_s = self.var_expand(line, stack_depth) |
|
2357 | 2362 | # Put magic args in a list so we can call with f(*a) syntax |
|
2358 | 2363 | args = [magic_arg_s] |
|
2359 | 2364 | kwargs = {} |
|
2360 | 2365 | # Grab local namespace if we need it: |
|
2361 | 2366 | if getattr(fn, "needs_local_scope", False): |
|
2362 | 2367 | kwargs['local_ns'] = self.get_local_scope(stack_depth) |
|
2363 | 2368 | with self.builtin_trap: |
|
2364 | 2369 | result = fn(*args, **kwargs) |
|
2365 | 2370 | |
|
2366 | 2371 | # The code below prevents the output from being displayed |
|
2367 | 2372 | # when using magics with decodator @output_can_be_silenced |
|
2368 | 2373 | # when the last Python token in the expression is a ';'. |
|
2369 | 2374 | if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False): |
|
2370 | 2375 | if DisplayHook.semicolon_at_end_of_expression(magic_arg_s): |
|
2371 | 2376 | return None |
|
2372 | 2377 | |
|
2373 | 2378 | return result |
|
2374 | 2379 | |
|
2375 | 2380 | def get_local_scope(self, stack_depth): |
|
2376 | 2381 | """Get local scope at given stack depth. |
|
2377 | 2382 | |
|
2378 | 2383 | Parameters |
|
2379 | 2384 | ---------- |
|
2380 | 2385 | stack_depth : int |
|
2381 | 2386 | Depth relative to calling frame |
|
2382 | 2387 | """ |
|
2383 | 2388 | return sys._getframe(stack_depth + 1).f_locals |
|
2384 | 2389 | |
|
2385 | 2390 | def run_cell_magic(self, magic_name, line, cell): |
|
2386 | 2391 | """Execute the given cell magic. |
|
2387 | 2392 | |
|
2388 | 2393 | Parameters |
|
2389 | 2394 | ---------- |
|
2390 | 2395 | magic_name : str |
|
2391 | 2396 | Name of the desired magic function, without '%' prefix. |
|
2392 | 2397 | line : str |
|
2393 | 2398 | The rest of the first input line as a single string. |
|
2394 | 2399 | cell : str |
|
2395 | 2400 | The body of the cell as a (possibly multiline) string. |
|
2396 | 2401 | """ |
|
2397 | 2402 | fn = self._find_with_lazy_load("cell", magic_name) |
|
2398 | 2403 | if fn is None: |
|
2399 | 2404 | lm = self.find_line_magic(magic_name) |
|
2400 | 2405 | etpl = "Cell magic `%%{0}` not found{1}." |
|
2401 | 2406 | extra = '' if lm is None else (' (But line magic `%{0}` exists, ' |
|
2402 | 2407 | 'did you mean that instead?)'.format(magic_name)) |
|
2403 | 2408 | raise UsageError(etpl.format(magic_name, extra)) |
|
2404 | 2409 | elif cell == '': |
|
2405 | 2410 | message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name) |
|
2406 | 2411 | if self.find_line_magic(magic_name) is not None: |
|
2407 | 2412 | message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name) |
|
2408 | 2413 | raise UsageError(message) |
|
2409 | 2414 | else: |
|
2410 | 2415 | # Note: this is the distance in the stack to the user's frame. |
|
2411 | 2416 | # This will need to be updated if the internal calling logic gets |
|
2412 | 2417 | # refactored, or else we'll be expanding the wrong variables. |
|
2413 | 2418 | stack_depth = 2 |
|
2414 | 2419 | if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False): |
|
2415 | 2420 | # magic has opted out of var_expand |
|
2416 | 2421 | magic_arg_s = line |
|
2417 | 2422 | else: |
|
2418 | 2423 | magic_arg_s = self.var_expand(line, stack_depth) |
|
2419 | 2424 | kwargs = {} |
|
2420 | 2425 | if getattr(fn, "needs_local_scope", False): |
|
2421 | 2426 | kwargs['local_ns'] = self.user_ns |
|
2422 | 2427 | |
|
2423 | 2428 | with self.builtin_trap: |
|
2424 | 2429 | args = (magic_arg_s, cell) |
|
2425 | 2430 | result = fn(*args, **kwargs) |
|
2426 | 2431 | |
|
2427 | 2432 | # The code below prevents the output from being displayed |
|
2428 | 2433 | # when using magics with decodator @output_can_be_silenced |
|
2429 | 2434 | # when the last Python token in the expression is a ';'. |
|
2430 | 2435 | if getattr(fn, magic.MAGIC_OUTPUT_CAN_BE_SILENCED, False): |
|
2431 | 2436 | if DisplayHook.semicolon_at_end_of_expression(cell): |
|
2432 | 2437 | return None |
|
2433 | 2438 | |
|
2434 | 2439 | return result |
|
2435 | 2440 | |
|
2436 | 2441 | def find_line_magic(self, magic_name): |
|
2437 | 2442 | """Find and return a line magic by name. |
|
2438 | 2443 | |
|
2439 | 2444 | Returns None if the magic isn't found.""" |
|
2440 | 2445 | return self.magics_manager.magics['line'].get(magic_name) |
|
2441 | 2446 | |
|
2442 | 2447 | def find_cell_magic(self, magic_name): |
|
2443 | 2448 | """Find and return a cell magic by name. |
|
2444 | 2449 | |
|
2445 | 2450 | Returns None if the magic isn't found.""" |
|
2446 | 2451 | return self.magics_manager.magics['cell'].get(magic_name) |
|
2447 | 2452 | |
|
2448 | 2453 | def find_magic(self, magic_name, magic_kind='line'): |
|
2449 | 2454 | """Find and return a magic of the given type by name. |
|
2450 | 2455 | |
|
2451 | 2456 | Returns None if the magic isn't found.""" |
|
2452 | 2457 | return self.magics_manager.magics[magic_kind].get(magic_name) |
|
2453 | 2458 | |
|
2454 | 2459 | def magic(self, arg_s): |
|
2455 | 2460 | """ |
|
2456 | 2461 | DEPRECATED |
|
2457 | 2462 | |
|
2458 | 2463 | Deprecated since IPython 0.13 (warning added in |
|
2459 | 2464 | 8.1), use run_line_magic(magic_name, parameter_s). |
|
2460 | 2465 | |
|
2461 | 2466 | Call a magic function by name. |
|
2462 | 2467 | |
|
2463 | 2468 | Input: a string containing the name of the magic function to call and |
|
2464 | 2469 | any additional arguments to be passed to the magic. |
|
2465 | 2470 | |
|
2466 | 2471 | magic('name -opt foo bar') is equivalent to typing at the ipython |
|
2467 | 2472 | prompt: |
|
2468 | 2473 | |
|
2469 | 2474 | In[1]: %name -opt foo bar |
|
2470 | 2475 | |
|
2471 | 2476 | To call a magic without arguments, simply use magic('name'). |
|
2472 | 2477 | |
|
2473 | 2478 | This provides a proper Python function to call IPython's magics in any |
|
2474 | 2479 | valid Python code you can type at the interpreter, including loops and |
|
2475 | 2480 | compound statements. |
|
2476 | 2481 | """ |
|
2477 | 2482 | warnings.warn( |
|
2478 | 2483 | "`magic(...)` is deprecated since IPython 0.13 (warning added in " |
|
2479 | 2484 | "8.1), use run_line_magic(magic_name, parameter_s).", |
|
2480 | 2485 | DeprecationWarning, |
|
2481 | 2486 | stacklevel=2, |
|
2482 | 2487 | ) |
|
2483 | 2488 | # TODO: should we issue a loud deprecation warning here? |
|
2484 | 2489 | magic_name, _, magic_arg_s = arg_s.partition(' ') |
|
2485 | 2490 | magic_name = magic_name.lstrip(prefilter.ESC_MAGIC) |
|
2486 | 2491 | return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2) |
|
2487 | 2492 | |
|
2488 | 2493 | #------------------------------------------------------------------------- |
|
2489 | 2494 | # Things related to macros |
|
2490 | 2495 | #------------------------------------------------------------------------- |
|
2491 | 2496 | |
|
2492 | 2497 | def define_macro(self, name, themacro): |
|
2493 | 2498 | """Define a new macro |
|
2494 | 2499 | |
|
2495 | 2500 | Parameters |
|
2496 | 2501 | ---------- |
|
2497 | 2502 | name : str |
|
2498 | 2503 | The name of the macro. |
|
2499 | 2504 | themacro : str or Macro |
|
2500 | 2505 | The action to do upon invoking the macro. If a string, a new |
|
2501 | 2506 | Macro object is created by passing the string to it. |
|
2502 | 2507 | """ |
|
2503 | 2508 | |
|
2504 | 2509 | from IPython.core import macro |
|
2505 | 2510 | |
|
2506 | 2511 | if isinstance(themacro, str): |
|
2507 | 2512 | themacro = macro.Macro(themacro) |
|
2508 | 2513 | if not isinstance(themacro, macro.Macro): |
|
2509 | 2514 | raise ValueError('A macro must be a string or a Macro instance.') |
|
2510 | 2515 | self.user_ns[name] = themacro |
|
2511 | 2516 | |
|
2512 | 2517 | #------------------------------------------------------------------------- |
|
2513 | 2518 | # Things related to the running of system commands |
|
2514 | 2519 | #------------------------------------------------------------------------- |
|
2515 | 2520 | |
|
2516 | 2521 | def system_piped(self, cmd): |
|
2517 | 2522 | """Call the given cmd in a subprocess, piping stdout/err |
|
2518 | 2523 | |
|
2519 | 2524 | Parameters |
|
2520 | 2525 | ---------- |
|
2521 | 2526 | cmd : str |
|
2522 | 2527 | Command to execute (can not end in '&', as background processes are |
|
2523 | 2528 | not supported. Should not be a command that expects input |
|
2524 | 2529 | other than simple text. |
|
2525 | 2530 | """ |
|
2526 | 2531 | if cmd.rstrip().endswith('&'): |
|
2527 | 2532 | # this is *far* from a rigorous test |
|
2528 | 2533 | # We do not support backgrounding processes because we either use |
|
2529 | 2534 | # pexpect or pipes to read from. Users can always just call |
|
2530 | 2535 | # os.system() or use ip.system=ip.system_raw |
|
2531 | 2536 | # if they really want a background process. |
|
2532 | 2537 | raise OSError("Background processes not supported.") |
|
2533 | 2538 | |
|
2534 | 2539 | # we explicitly do NOT return the subprocess status code, because |
|
2535 | 2540 | # a non-None value would trigger :func:`sys.displayhook` calls. |
|
2536 | 2541 | # Instead, we store the exit_code in user_ns. |
|
2537 | 2542 | self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1)) |
|
2538 | 2543 | |
|
2539 | 2544 | def system_raw(self, cmd): |
|
2540 | 2545 | """Call the given cmd in a subprocess using os.system on Windows or |
|
2541 | 2546 | subprocess.call using the system shell on other platforms. |
|
2542 | 2547 | |
|
2543 | 2548 | Parameters |
|
2544 | 2549 | ---------- |
|
2545 | 2550 | cmd : str |
|
2546 | 2551 | Command to execute. |
|
2547 | 2552 | """ |
|
2548 | 2553 | cmd = self.var_expand(cmd, depth=1) |
|
2549 | 2554 | # warn if there is an IPython magic alternative. |
|
2550 | 2555 | main_cmd = cmd.split()[0] |
|
2551 | 2556 | has_magic_alternatives = ("pip", "conda", "cd") |
|
2552 | 2557 | |
|
2553 | 2558 | if main_cmd in has_magic_alternatives: |
|
2554 | 2559 | warnings.warn( |
|
2555 | 2560 | ( |
|
2556 | 2561 | "You executed the system command !{0} which may not work " |
|
2557 | 2562 | "as expected. Try the IPython magic %{0} instead." |
|
2558 | 2563 | ).format(main_cmd) |
|
2559 | 2564 | ) |
|
2560 | 2565 | |
|
2561 | 2566 | # protect os.system from UNC paths on Windows, which it can't handle: |
|
2562 | 2567 | if sys.platform == 'win32': |
|
2563 | 2568 | from IPython.utils._process_win32 import AvoidUNCPath |
|
2564 | 2569 | with AvoidUNCPath() as path: |
|
2565 | 2570 | if path is not None: |
|
2566 | 2571 | cmd = '"pushd %s &&"%s' % (path, cmd) |
|
2567 | 2572 | try: |
|
2568 | 2573 | ec = os.system(cmd) |
|
2569 | 2574 | except KeyboardInterrupt: |
|
2570 | 2575 | print('\n' + self.get_exception_only(), file=sys.stderr) |
|
2571 | 2576 | ec = -2 |
|
2572 | 2577 | else: |
|
2573 | 2578 | # For posix the result of the subprocess.call() below is an exit |
|
2574 | 2579 | # code, which by convention is zero for success, positive for |
|
2575 | 2580 | # program failure. Exit codes above 128 are reserved for signals, |
|
2576 | 2581 | # and the formula for converting a signal to an exit code is usually |
|
2577 | 2582 | # signal_number+128. To more easily differentiate between exit |
|
2578 | 2583 | # codes and signals, ipython uses negative numbers. For instance |
|
2579 | 2584 | # since control-c is signal 2 but exit code 130, ipython's |
|
2580 | 2585 | # _exit_code variable will read -2. Note that some shells like |
|
2581 | 2586 | # csh and fish don't follow sh/bash conventions for exit codes. |
|
2582 | 2587 | executable = os.environ.get('SHELL', None) |
|
2583 | 2588 | try: |
|
2584 | 2589 | # Use env shell instead of default /bin/sh |
|
2585 | 2590 | ec = subprocess.call(cmd, shell=True, executable=executable) |
|
2586 | 2591 | except KeyboardInterrupt: |
|
2587 | 2592 | # intercept control-C; a long traceback is not useful here |
|
2588 | 2593 | print('\n' + self.get_exception_only(), file=sys.stderr) |
|
2589 | 2594 | ec = 130 |
|
2590 | 2595 | if ec > 128: |
|
2591 | 2596 | ec = -(ec - 128) |
|
2592 | 2597 | |
|
2593 | 2598 | # We explicitly do NOT return the subprocess status code, because |
|
2594 | 2599 | # a non-None value would trigger :func:`sys.displayhook` calls. |
|
2595 | 2600 | # Instead, we store the exit_code in user_ns. Note the semantics |
|
2596 | 2601 | # of _exit_code: for control-c, _exit_code == -signal.SIGNIT, |
|
2597 | 2602 | # but raising SystemExit(_exit_code) will give status 254! |
|
2598 | 2603 | self.user_ns['_exit_code'] = ec |
|
2599 | 2604 | |
|
2600 | 2605 | # use piped system by default, because it is better behaved |
|
2601 | 2606 | system = system_piped |
|
2602 | 2607 | |
|
2603 | 2608 | def getoutput(self, cmd, split=True, depth=0): |
|
2604 | 2609 | """Get output (possibly including stderr) from a subprocess. |
|
2605 | 2610 | |
|
2606 | 2611 | Parameters |
|
2607 | 2612 | ---------- |
|
2608 | 2613 | cmd : str |
|
2609 | 2614 | Command to execute (can not end in '&', as background processes are |
|
2610 | 2615 | not supported. |
|
2611 | 2616 | split : bool, optional |
|
2612 | 2617 | If True, split the output into an IPython SList. Otherwise, an |
|
2613 | 2618 | IPython LSString is returned. These are objects similar to normal |
|
2614 | 2619 | lists and strings, with a few convenience attributes for easier |
|
2615 | 2620 | manipulation of line-based output. You can use '?' on them for |
|
2616 | 2621 | details. |
|
2617 | 2622 | depth : int, optional |
|
2618 | 2623 | How many frames above the caller are the local variables which should |
|
2619 | 2624 | be expanded in the command string? The default (0) assumes that the |
|
2620 | 2625 | expansion variables are in the stack frame calling this function. |
|
2621 | 2626 | """ |
|
2622 | 2627 | if cmd.rstrip().endswith('&'): |
|
2623 | 2628 | # this is *far* from a rigorous test |
|
2624 | 2629 | raise OSError("Background processes not supported.") |
|
2625 | 2630 | out = getoutput(self.var_expand(cmd, depth=depth+1)) |
|
2626 | 2631 | if split: |
|
2627 | 2632 | out = SList(out.splitlines()) |
|
2628 | 2633 | else: |
|
2629 | 2634 | out = LSString(out) |
|
2630 | 2635 | return out |
|
2631 | 2636 | |
|
2632 | 2637 | #------------------------------------------------------------------------- |
|
2633 | 2638 | # Things related to aliases |
|
2634 | 2639 | #------------------------------------------------------------------------- |
|
2635 | 2640 | |
|
2636 | 2641 | def init_alias(self): |
|
2637 | 2642 | self.alias_manager = AliasManager(shell=self, parent=self) |
|
2638 | 2643 | self.configurables.append(self.alias_manager) |
|
2639 | 2644 | |
|
2640 | 2645 | #------------------------------------------------------------------------- |
|
2641 | 2646 | # Things related to extensions |
|
2642 | 2647 | #------------------------------------------------------------------------- |
|
2643 | 2648 | |
|
2644 | 2649 | def init_extension_manager(self): |
|
2645 | 2650 | self.extension_manager = ExtensionManager(shell=self, parent=self) |
|
2646 | 2651 | self.configurables.append(self.extension_manager) |
|
2647 | 2652 | |
|
2648 | 2653 | #------------------------------------------------------------------------- |
|
2649 | 2654 | # Things related to payloads |
|
2650 | 2655 | #------------------------------------------------------------------------- |
|
2651 | 2656 | |
|
2652 | 2657 | def init_payload(self): |
|
2653 | 2658 | self.payload_manager = PayloadManager(parent=self) |
|
2654 | 2659 | self.configurables.append(self.payload_manager) |
|
2655 | 2660 | |
|
2656 | 2661 | #------------------------------------------------------------------------- |
|
2657 | 2662 | # Things related to the prefilter |
|
2658 | 2663 | #------------------------------------------------------------------------- |
|
2659 | 2664 | |
|
2660 | 2665 | def init_prefilter(self): |
|
2661 | 2666 | self.prefilter_manager = PrefilterManager(shell=self, parent=self) |
|
2662 | 2667 | self.configurables.append(self.prefilter_manager) |
|
2663 | 2668 | # Ultimately this will be refactored in the new interpreter code, but |
|
2664 | 2669 | # for now, we should expose the main prefilter method (there's legacy |
|
2665 | 2670 | # code out there that may rely on this). |
|
2666 | 2671 | self.prefilter = self.prefilter_manager.prefilter_lines |
|
2667 | 2672 | |
|
2668 | 2673 | def auto_rewrite_input(self, cmd): |
|
2669 | 2674 | """Print to the screen the rewritten form of the user's command. |
|
2670 | 2675 | |
|
2671 | 2676 | This shows visual feedback by rewriting input lines that cause |
|
2672 | 2677 | automatic calling to kick in, like:: |
|
2673 | 2678 | |
|
2674 | 2679 | /f x |
|
2675 | 2680 | |
|
2676 | 2681 | into:: |
|
2677 | 2682 | |
|
2678 | 2683 | ------> f(x) |
|
2679 | 2684 | |
|
2680 | 2685 | after the user's input prompt. This helps the user understand that the |
|
2681 | 2686 | input line was transformed automatically by IPython. |
|
2682 | 2687 | """ |
|
2683 | 2688 | if not self.show_rewritten_input: |
|
2684 | 2689 | return |
|
2685 | 2690 | |
|
2686 | 2691 | # This is overridden in TerminalInteractiveShell to use fancy prompts |
|
2687 | 2692 | print("------> " + cmd) |
|
2688 | 2693 | |
|
2689 | 2694 | #------------------------------------------------------------------------- |
|
2690 | 2695 | # Things related to extracting values/expressions from kernel and user_ns |
|
2691 | 2696 | #------------------------------------------------------------------------- |
|
2692 | 2697 | |
|
2693 | 2698 | def _user_obj_error(self): |
|
2694 | 2699 | """return simple exception dict |
|
2695 | 2700 | |
|
2696 | 2701 | for use in user_expressions |
|
2697 | 2702 | """ |
|
2698 | 2703 | |
|
2699 | 2704 | etype, evalue, tb = self._get_exc_info() |
|
2700 | 2705 | stb = self.InteractiveTB.get_exception_only(etype, evalue) |
|
2701 | 2706 | |
|
2702 | 2707 | exc_info = { |
|
2703 | 2708 | "status": "error", |
|
2704 | 2709 | "traceback": stb, |
|
2705 | 2710 | "ename": etype.__name__, |
|
2706 | 2711 | "evalue": py3compat.safe_unicode(evalue), |
|
2707 | 2712 | } |
|
2708 | 2713 | |
|
2709 | 2714 | return exc_info |
|
2710 | 2715 | |
|
2711 | 2716 | def _format_user_obj(self, obj): |
|
2712 | 2717 | """format a user object to display dict |
|
2713 | 2718 | |
|
2714 | 2719 | for use in user_expressions |
|
2715 | 2720 | """ |
|
2716 | 2721 | |
|
2717 | 2722 | data, md = self.display_formatter.format(obj) |
|
2718 | 2723 | value = { |
|
2719 | 2724 | 'status' : 'ok', |
|
2720 | 2725 | 'data' : data, |
|
2721 | 2726 | 'metadata' : md, |
|
2722 | 2727 | } |
|
2723 | 2728 | return value |
|
2724 | 2729 | |
|
2725 | 2730 | def user_expressions(self, expressions): |
|
2726 | 2731 | """Evaluate a dict of expressions in the user's namespace. |
|
2727 | 2732 | |
|
2728 | 2733 | Parameters |
|
2729 | 2734 | ---------- |
|
2730 | 2735 | expressions : dict |
|
2731 | 2736 | A dict with string keys and string values. The expression values |
|
2732 | 2737 | should be valid Python expressions, each of which will be evaluated |
|
2733 | 2738 | in the user namespace. |
|
2734 | 2739 | |
|
2735 | 2740 | Returns |
|
2736 | 2741 | ------- |
|
2737 | 2742 | A dict, keyed like the input expressions dict, with the rich mime-typed |
|
2738 | 2743 | display_data of each value. |
|
2739 | 2744 | """ |
|
2740 | 2745 | out = {} |
|
2741 | 2746 | user_ns = self.user_ns |
|
2742 | 2747 | global_ns = self.user_global_ns |
|
2743 | 2748 | |
|
2744 | 2749 | for key, expr in expressions.items(): |
|
2745 | 2750 | try: |
|
2746 | 2751 | value = self._format_user_obj(eval(expr, global_ns, user_ns)) |
|
2747 | 2752 | except: |
|
2748 | 2753 | value = self._user_obj_error() |
|
2749 | 2754 | out[key] = value |
|
2750 | 2755 | return out |
|
2751 | 2756 | |
|
2752 | 2757 | #------------------------------------------------------------------------- |
|
2753 | 2758 | # Things related to the running of code |
|
2754 | 2759 | #------------------------------------------------------------------------- |
|
2755 | 2760 | |
|
2756 | 2761 | def ex(self, cmd): |
|
2757 | 2762 | """Execute a normal python statement in user namespace.""" |
|
2758 | 2763 | with self.builtin_trap: |
|
2759 | 2764 | exec(cmd, self.user_global_ns, self.user_ns) |
|
2760 | 2765 | |
|
2761 | 2766 | def ev(self, expr): |
|
2762 | 2767 | """Evaluate python expression expr in user namespace. |
|
2763 | 2768 | |
|
2764 | 2769 | Returns the result of evaluation |
|
2765 | 2770 | """ |
|
2766 | 2771 | with self.builtin_trap: |
|
2767 | 2772 | return eval(expr, self.user_global_ns, self.user_ns) |
|
2768 | 2773 | |
|
2769 | 2774 | def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False): |
|
2770 | 2775 | """A safe version of the builtin execfile(). |
|
2771 | 2776 | |
|
2772 | 2777 | This version will never throw an exception, but instead print |
|
2773 | 2778 | helpful error messages to the screen. This only works on pure |
|
2774 | 2779 | Python files with the .py extension. |
|
2775 | 2780 | |
|
2776 | 2781 | Parameters |
|
2777 | 2782 | ---------- |
|
2778 | 2783 | fname : string |
|
2779 | 2784 | The name of the file to be executed. |
|
2780 | 2785 | *where : tuple |
|
2781 | 2786 | One or two namespaces, passed to execfile() as (globals,locals). |
|
2782 | 2787 | If only one is given, it is passed as both. |
|
2783 | 2788 | exit_ignore : bool (False) |
|
2784 | 2789 | If True, then silence SystemExit for non-zero status (it is always |
|
2785 | 2790 | silenced for zero status, as it is so common). |
|
2786 | 2791 | raise_exceptions : bool (False) |
|
2787 | 2792 | If True raise exceptions everywhere. Meant for testing. |
|
2788 | 2793 | shell_futures : bool (False) |
|
2789 | 2794 | If True, the code will share future statements with the interactive |
|
2790 | 2795 | shell. It will both be affected by previous __future__ imports, and |
|
2791 | 2796 | any __future__ imports in the code will affect the shell. If False, |
|
2792 | 2797 | __future__ imports are not shared in either direction. |
|
2793 | 2798 | |
|
2794 | 2799 | """ |
|
2795 | 2800 | fname = Path(fname).expanduser().resolve() |
|
2796 | 2801 | |
|
2797 | 2802 | # Make sure we can open the file |
|
2798 | 2803 | try: |
|
2799 | 2804 | with fname.open("rb"): |
|
2800 | 2805 | pass |
|
2801 | 2806 | except: |
|
2802 | 2807 | warn('Could not open file <%s> for safe execution.' % fname) |
|
2803 | 2808 | return |
|
2804 | 2809 | |
|
2805 | 2810 | # Find things also in current directory. This is needed to mimic the |
|
2806 | 2811 | # behavior of running a script from the system command line, where |
|
2807 | 2812 | # Python inserts the script's directory into sys.path |
|
2808 | 2813 | dname = str(fname.parent) |
|
2809 | 2814 | |
|
2810 | 2815 | with prepended_to_syspath(dname), self.builtin_trap: |
|
2811 | 2816 | try: |
|
2812 | 2817 | glob, loc = (where + (None, ))[:2] |
|
2813 | 2818 | py3compat.execfile( |
|
2814 | 2819 | fname, glob, loc, |
|
2815 | 2820 | self.compile if shell_futures else None) |
|
2816 | 2821 | except SystemExit as status: |
|
2817 | 2822 | # If the call was made with 0 or None exit status (sys.exit(0) |
|
2818 | 2823 | # or sys.exit() ), don't bother showing a traceback, as both of |
|
2819 | 2824 | # these are considered normal by the OS: |
|
2820 | 2825 | # > python -c'import sys;sys.exit(0)'; echo $? |
|
2821 | 2826 | # 0 |
|
2822 | 2827 | # > python -c'import sys;sys.exit()'; echo $? |
|
2823 | 2828 | # 0 |
|
2824 | 2829 | # For other exit status, we show the exception unless |
|
2825 | 2830 | # explicitly silenced, but only in short form. |
|
2826 | 2831 | if status.code: |
|
2827 | 2832 | if raise_exceptions: |
|
2828 | 2833 | raise |
|
2829 | 2834 | if not exit_ignore: |
|
2830 | 2835 | self.showtraceback(exception_only=True) |
|
2831 | 2836 | except: |
|
2832 | 2837 | if raise_exceptions: |
|
2833 | 2838 | raise |
|
2834 | 2839 | # tb offset is 2 because we wrap execfile |
|
2835 | 2840 | self.showtraceback(tb_offset=2) |
|
2836 | 2841 | |
|
2837 | 2842 | def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False): |
|
2838 | 2843 | """Like safe_execfile, but for .ipy or .ipynb files with IPython syntax. |
|
2839 | 2844 | |
|
2840 | 2845 | Parameters |
|
2841 | 2846 | ---------- |
|
2842 | 2847 | fname : str |
|
2843 | 2848 | The name of the file to execute. The filename must have a |
|
2844 | 2849 | .ipy or .ipynb extension. |
|
2845 | 2850 | shell_futures : bool (False) |
|
2846 | 2851 | If True, the code will share future statements with the interactive |
|
2847 | 2852 | shell. It will both be affected by previous __future__ imports, and |
|
2848 | 2853 | any __future__ imports in the code will affect the shell. If False, |
|
2849 | 2854 | __future__ imports are not shared in either direction. |
|
2850 | 2855 | raise_exceptions : bool (False) |
|
2851 | 2856 | If True raise exceptions everywhere. Meant for testing. |
|
2852 | 2857 | """ |
|
2853 | 2858 | fname = Path(fname).expanduser().resolve() |
|
2854 | 2859 | |
|
2855 | 2860 | # Make sure we can open the file |
|
2856 | 2861 | try: |
|
2857 | 2862 | with fname.open("rb"): |
|
2858 | 2863 | pass |
|
2859 | 2864 | except: |
|
2860 | 2865 | warn('Could not open file <%s> for safe execution.' % fname) |
|
2861 | 2866 | return |
|
2862 | 2867 | |
|
2863 | 2868 | # Find things also in current directory. This is needed to mimic the |
|
2864 | 2869 | # behavior of running a script from the system command line, where |
|
2865 | 2870 | # Python inserts the script's directory into sys.path |
|
2866 | 2871 | dname = str(fname.parent) |
|
2867 | 2872 | |
|
2868 | 2873 | def get_cells(): |
|
2869 | 2874 | """generator for sequence of code blocks to run""" |
|
2870 | 2875 | if fname.suffix == ".ipynb": |
|
2871 | 2876 | from nbformat import read |
|
2872 | 2877 | nb = read(fname, as_version=4) |
|
2873 | 2878 | if not nb.cells: |
|
2874 | 2879 | return |
|
2875 | 2880 | for cell in nb.cells: |
|
2876 | 2881 | if cell.cell_type == 'code': |
|
2877 | 2882 | yield cell.source |
|
2878 | 2883 | else: |
|
2879 | 2884 | yield fname.read_text(encoding="utf-8") |
|
2880 | 2885 | |
|
2881 | 2886 | with prepended_to_syspath(dname): |
|
2882 | 2887 | try: |
|
2883 | 2888 | for cell in get_cells(): |
|
2884 | 2889 | result = self.run_cell(cell, silent=True, shell_futures=shell_futures) |
|
2885 | 2890 | if raise_exceptions: |
|
2886 | 2891 | result.raise_error() |
|
2887 | 2892 | elif not result.success: |
|
2888 | 2893 | break |
|
2889 | 2894 | except: |
|
2890 | 2895 | if raise_exceptions: |
|
2891 | 2896 | raise |
|
2892 | 2897 | self.showtraceback() |
|
2893 | 2898 | warn('Unknown failure executing file: <%s>' % fname) |
|
2894 | 2899 | |
|
2895 | 2900 | def safe_run_module(self, mod_name, where): |
|
2896 | 2901 | """A safe version of runpy.run_module(). |
|
2897 | 2902 | |
|
2898 | 2903 | This version will never throw an exception, but instead print |
|
2899 | 2904 | helpful error messages to the screen. |
|
2900 | 2905 | |
|
2901 | 2906 | `SystemExit` exceptions with status code 0 or None are ignored. |
|
2902 | 2907 | |
|
2903 | 2908 | Parameters |
|
2904 | 2909 | ---------- |
|
2905 | 2910 | mod_name : string |
|
2906 | 2911 | The name of the module to be executed. |
|
2907 | 2912 | where : dict |
|
2908 | 2913 | The globals namespace. |
|
2909 | 2914 | """ |
|
2910 | 2915 | try: |
|
2911 | 2916 | try: |
|
2912 | 2917 | where.update( |
|
2913 | 2918 | runpy.run_module(str(mod_name), run_name="__main__", |
|
2914 | 2919 | alter_sys=True) |
|
2915 | 2920 | ) |
|
2916 | 2921 | except SystemExit as status: |
|
2917 | 2922 | if status.code: |
|
2918 | 2923 | raise |
|
2919 | 2924 | except: |
|
2920 | 2925 | self.showtraceback() |
|
2921 | 2926 | warn('Unknown failure executing module: <%s>' % mod_name) |
|
2922 | 2927 | |
|
2923 | 2928 | def run_cell( |
|
2924 | 2929 | self, |
|
2925 | 2930 | raw_cell, |
|
2926 | 2931 | store_history=False, |
|
2927 | 2932 | silent=False, |
|
2928 | 2933 | shell_futures=True, |
|
2929 | 2934 | cell_id=None, |
|
2930 | 2935 | ): |
|
2931 | 2936 | """Run a complete IPython cell. |
|
2932 | 2937 | |
|
2933 | 2938 | Parameters |
|
2934 | 2939 | ---------- |
|
2935 | 2940 | raw_cell : str |
|
2936 | 2941 | The code (including IPython code such as %magic functions) to run. |
|
2937 | 2942 | store_history : bool |
|
2938 | 2943 | If True, the raw and translated cell will be stored in IPython's |
|
2939 | 2944 | history. For user code calling back into IPython's machinery, this |
|
2940 | 2945 | should be set to False. |
|
2941 | 2946 | silent : bool |
|
2942 | 2947 | If True, avoid side-effects, such as implicit displayhooks and |
|
2943 | 2948 | and logging. silent=True forces store_history=False. |
|
2944 | 2949 | shell_futures : bool |
|
2945 | 2950 | If True, the code will share future statements with the interactive |
|
2946 | 2951 | shell. It will both be affected by previous __future__ imports, and |
|
2947 | 2952 | any __future__ imports in the code will affect the shell. If False, |
|
2948 | 2953 | __future__ imports are not shared in either direction. |
|
2949 | 2954 | |
|
2950 | 2955 | Returns |
|
2951 | 2956 | ------- |
|
2952 | 2957 | result : :class:`ExecutionResult` |
|
2953 | 2958 | """ |
|
2954 | 2959 | result = None |
|
2955 | 2960 | try: |
|
2956 | 2961 | result = self._run_cell( |
|
2957 | 2962 | raw_cell, store_history, silent, shell_futures, cell_id |
|
2958 | 2963 | ) |
|
2959 | 2964 | finally: |
|
2960 | 2965 | self.events.trigger('post_execute') |
|
2961 | 2966 | if not silent: |
|
2962 | 2967 | self.events.trigger('post_run_cell', result) |
|
2963 | 2968 | return result |
|
2964 | 2969 | |
|
2965 | 2970 | def _run_cell( |
|
2966 | 2971 | self, |
|
2967 | 2972 | raw_cell: str, |
|
2968 | 2973 | store_history: bool, |
|
2969 | 2974 | silent: bool, |
|
2970 | 2975 | shell_futures: bool, |
|
2971 | 2976 | cell_id: str, |
|
2972 | 2977 | ) -> ExecutionResult: |
|
2973 | 2978 | """Internal method to run a complete IPython cell.""" |
|
2974 | 2979 | |
|
2975 | 2980 | # we need to avoid calling self.transform_cell multiple time on the same thing |
|
2976 | 2981 | # so we need to store some results: |
|
2977 | 2982 | preprocessing_exc_tuple = None |
|
2978 | 2983 | try: |
|
2979 | 2984 | transformed_cell = self.transform_cell(raw_cell) |
|
2980 | 2985 | except Exception: |
|
2981 | 2986 | transformed_cell = raw_cell |
|
2982 | 2987 | preprocessing_exc_tuple = sys.exc_info() |
|
2983 | 2988 | |
|
2984 | 2989 | assert transformed_cell is not None |
|
2985 | 2990 | coro = self.run_cell_async( |
|
2986 | 2991 | raw_cell, |
|
2987 | 2992 | store_history=store_history, |
|
2988 | 2993 | silent=silent, |
|
2989 | 2994 | shell_futures=shell_futures, |
|
2990 | 2995 | transformed_cell=transformed_cell, |
|
2991 | 2996 | preprocessing_exc_tuple=preprocessing_exc_tuple, |
|
2992 | 2997 | cell_id=cell_id, |
|
2993 | 2998 | ) |
|
2994 | 2999 | |
|
2995 | 3000 | # run_cell_async is async, but may not actually need an eventloop. |
|
2996 | 3001 | # when this is the case, we want to run it using the pseudo_sync_runner |
|
2997 | 3002 | # so that code can invoke eventloops (for example via the %run , and |
|
2998 | 3003 | # `%paste` magic. |
|
2999 | 3004 | if self.trio_runner: |
|
3000 | 3005 | runner = self.trio_runner |
|
3001 | 3006 | elif self.should_run_async( |
|
3002 | 3007 | raw_cell, |
|
3003 | 3008 | transformed_cell=transformed_cell, |
|
3004 | 3009 | preprocessing_exc_tuple=preprocessing_exc_tuple, |
|
3005 | 3010 | ): |
|
3006 | 3011 | runner = self.loop_runner |
|
3007 | 3012 | else: |
|
3008 | 3013 | runner = _pseudo_sync_runner |
|
3009 | 3014 | |
|
3010 | 3015 | try: |
|
3011 | 3016 | return runner(coro) |
|
3012 | 3017 | except BaseException as e: |
|
3013 | 3018 | info = ExecutionInfo( |
|
3014 | 3019 | raw_cell, store_history, silent, shell_futures, cell_id |
|
3015 | 3020 | ) |
|
3016 | 3021 | result = ExecutionResult(info) |
|
3017 | 3022 | result.error_in_exec = e |
|
3018 | 3023 | self.showtraceback(running_compiled_code=True) |
|
3019 | 3024 | return result |
|
3020 | 3025 | |
|
3021 | 3026 | def should_run_async( |
|
3022 | 3027 | self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None |
|
3023 | 3028 | ) -> bool: |
|
3024 | 3029 | """Return whether a cell should be run asynchronously via a coroutine runner |
|
3025 | 3030 | |
|
3026 | 3031 | Parameters |
|
3027 | 3032 | ---------- |
|
3028 | 3033 | raw_cell : str |
|
3029 | 3034 | The code to be executed |
|
3030 | 3035 | |
|
3031 | 3036 | Returns |
|
3032 | 3037 | ------- |
|
3033 | 3038 | result: bool |
|
3034 | 3039 | Whether the code needs to be run with a coroutine runner or not |
|
3035 | 3040 | .. versionadded:: 7.0 |
|
3036 | 3041 | """ |
|
3037 | 3042 | if not self.autoawait: |
|
3038 | 3043 | return False |
|
3039 | 3044 | if preprocessing_exc_tuple is not None: |
|
3040 | 3045 | return False |
|
3041 | 3046 | assert preprocessing_exc_tuple is None |
|
3042 | 3047 | if transformed_cell is None: |
|
3043 | 3048 | warnings.warn( |
|
3044 | 3049 | "`should_run_async` will not call `transform_cell`" |
|
3045 | 3050 | " automatically in the future. Please pass the result to" |
|
3046 | 3051 | " `transformed_cell` argument and any exception that happen" |
|
3047 | 3052 | " during the" |
|
3048 | 3053 | "transform in `preprocessing_exc_tuple` in" |
|
3049 | 3054 | " IPython 7.17 and above.", |
|
3050 | 3055 | DeprecationWarning, |
|
3051 | 3056 | stacklevel=2, |
|
3052 | 3057 | ) |
|
3053 | 3058 | try: |
|
3054 | 3059 | cell = self.transform_cell(raw_cell) |
|
3055 | 3060 | except Exception: |
|
3056 | 3061 | # any exception during transform will be raised |
|
3057 | 3062 | # prior to execution |
|
3058 | 3063 | return False |
|
3059 | 3064 | else: |
|
3060 | 3065 | cell = transformed_cell |
|
3061 | 3066 | return _should_be_async(cell) |
|
3062 | 3067 | |
|
3063 | 3068 | async def run_cell_async( |
|
3064 | 3069 | self, |
|
3065 | 3070 | raw_cell: str, |
|
3066 | 3071 | store_history=False, |
|
3067 | 3072 | silent=False, |
|
3068 | 3073 | shell_futures=True, |
|
3069 | 3074 | *, |
|
3070 | 3075 | transformed_cell: Optional[str] = None, |
|
3071 | 3076 | preprocessing_exc_tuple: Optional[Any] = None, |
|
3072 | 3077 | cell_id=None, |
|
3073 | 3078 | ) -> ExecutionResult: |
|
3074 | 3079 | """Run a complete IPython cell asynchronously. |
|
3075 | 3080 | |
|
3076 | 3081 | Parameters |
|
3077 | 3082 | ---------- |
|
3078 | 3083 | raw_cell : str |
|
3079 | 3084 | The code (including IPython code such as %magic functions) to run. |
|
3080 | 3085 | store_history : bool |
|
3081 | 3086 | If True, the raw and translated cell will be stored in IPython's |
|
3082 | 3087 | history. For user code calling back into IPython's machinery, this |
|
3083 | 3088 | should be set to False. |
|
3084 | 3089 | silent : bool |
|
3085 | 3090 | If True, avoid side-effects, such as implicit displayhooks and |
|
3086 | 3091 | and logging. silent=True forces store_history=False. |
|
3087 | 3092 | shell_futures : bool |
|
3088 | 3093 | If True, the code will share future statements with the interactive |
|
3089 | 3094 | shell. It will both be affected by previous __future__ imports, and |
|
3090 | 3095 | any __future__ imports in the code will affect the shell. If False, |
|
3091 | 3096 | __future__ imports are not shared in either direction. |
|
3092 | 3097 | transformed_cell: str |
|
3093 | 3098 | cell that was passed through transformers |
|
3094 | 3099 | preprocessing_exc_tuple: |
|
3095 | 3100 | trace if the transformation failed. |
|
3096 | 3101 | |
|
3097 | 3102 | Returns |
|
3098 | 3103 | ------- |
|
3099 | 3104 | result : :class:`ExecutionResult` |
|
3100 | 3105 | |
|
3101 | 3106 | .. versionadded:: 7.0 |
|
3102 | 3107 | """ |
|
3103 | 3108 | info = ExecutionInfo(raw_cell, store_history, silent, shell_futures, cell_id) |
|
3104 | 3109 | result = ExecutionResult(info) |
|
3105 | 3110 | |
|
3106 | 3111 | if (not raw_cell) or raw_cell.isspace(): |
|
3107 | 3112 | self.last_execution_succeeded = True |
|
3108 | 3113 | self.last_execution_result = result |
|
3109 | 3114 | return result |
|
3110 | 3115 | |
|
3111 | 3116 | if silent: |
|
3112 | 3117 | store_history = False |
|
3113 | 3118 | |
|
3114 | 3119 | if store_history: |
|
3115 | 3120 | result.execution_count = self.execution_count |
|
3116 | 3121 | |
|
3117 | 3122 | def error_before_exec(value): |
|
3118 | 3123 | if store_history: |
|
3119 | 3124 | self.execution_count += 1 |
|
3120 | 3125 | result.error_before_exec = value |
|
3121 | 3126 | self.last_execution_succeeded = False |
|
3122 | 3127 | self.last_execution_result = result |
|
3123 | 3128 | return result |
|
3124 | 3129 | |
|
3125 | 3130 | self.events.trigger('pre_execute') |
|
3126 | 3131 | if not silent: |
|
3127 | 3132 | self.events.trigger('pre_run_cell', info) |
|
3128 | 3133 | |
|
3129 | 3134 | if transformed_cell is None: |
|
3130 | 3135 | warnings.warn( |
|
3131 | 3136 | "`run_cell_async` will not call `transform_cell`" |
|
3132 | 3137 | " automatically in the future. Please pass the result to" |
|
3133 | 3138 | " `transformed_cell` argument and any exception that happen" |
|
3134 | 3139 | " during the" |
|
3135 | 3140 | "transform in `preprocessing_exc_tuple` in" |
|
3136 | 3141 | " IPython 7.17 and above.", |
|
3137 | 3142 | DeprecationWarning, |
|
3138 | 3143 | stacklevel=2, |
|
3139 | 3144 | ) |
|
3140 | 3145 | # If any of our input transformation (input_transformer_manager or |
|
3141 | 3146 | # prefilter_manager) raises an exception, we store it in this variable |
|
3142 | 3147 | # so that we can display the error after logging the input and storing |
|
3143 | 3148 | # it in the history. |
|
3144 | 3149 | try: |
|
3145 | 3150 | cell = self.transform_cell(raw_cell) |
|
3146 | 3151 | except Exception: |
|
3147 | 3152 | preprocessing_exc_tuple = sys.exc_info() |
|
3148 | 3153 | cell = raw_cell # cell has to exist so it can be stored/logged |
|
3149 | 3154 | else: |
|
3150 | 3155 | preprocessing_exc_tuple = None |
|
3151 | 3156 | else: |
|
3152 | 3157 | if preprocessing_exc_tuple is None: |
|
3153 | 3158 | cell = transformed_cell |
|
3154 | 3159 | else: |
|
3155 | 3160 | cell = raw_cell |
|
3156 | 3161 | |
|
3162 | # Do NOT store paste/cpaste magic history | |
|
3163 | if "get_ipython().run_line_magic(" in cell and "paste" in cell: | |
|
3164 | store_history = False | |
|
3165 | ||
|
3157 | 3166 | # Store raw and processed history |
|
3158 | if store_history and raw_cell.strip(" %") != "paste": | |
|
3167 | if store_history: | |
|
3159 | 3168 | self.history_manager.store_inputs(self.execution_count, cell, raw_cell) |
|
3160 | 3169 | if not silent: |
|
3161 | 3170 | self.logger.log(cell, raw_cell) |
|
3162 | 3171 | |
|
3163 | 3172 | # Display the exception if input processing failed. |
|
3164 | 3173 | if preprocessing_exc_tuple is not None: |
|
3165 | 3174 | self.showtraceback(preprocessing_exc_tuple) |
|
3166 | 3175 | if store_history: |
|
3167 | 3176 | self.execution_count += 1 |
|
3168 | 3177 | return error_before_exec(preprocessing_exc_tuple[1]) |
|
3169 | 3178 | |
|
3170 | 3179 | # Our own compiler remembers the __future__ environment. If we want to |
|
3171 | 3180 | # run code with a separate __future__ environment, use the default |
|
3172 | 3181 | # compiler |
|
3173 | 3182 | compiler = self.compile if shell_futures else self.compiler_class() |
|
3174 | 3183 | |
|
3175 | 3184 | _run_async = False |
|
3176 | 3185 | |
|
3177 | 3186 | with self.builtin_trap: |
|
3178 | 3187 | cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell) |
|
3179 | 3188 | |
|
3180 | 3189 | with self.display_trap: |
|
3181 | 3190 | # Compile to bytecode |
|
3182 | 3191 | try: |
|
3183 | 3192 | code_ast = compiler.ast_parse(cell, filename=cell_name) |
|
3184 | 3193 | except self.custom_exceptions as e: |
|
3185 | 3194 | etype, value, tb = sys.exc_info() |
|
3186 | 3195 | self.CustomTB(etype, value, tb) |
|
3187 | 3196 | return error_before_exec(e) |
|
3188 | 3197 | except IndentationError as e: |
|
3189 | 3198 | self.showindentationerror() |
|
3190 | 3199 | return error_before_exec(e) |
|
3191 | 3200 | except (OverflowError, SyntaxError, ValueError, TypeError, |
|
3192 | 3201 | MemoryError) as e: |
|
3193 | 3202 | self.showsyntaxerror() |
|
3194 | 3203 | return error_before_exec(e) |
|
3195 | 3204 | |
|
3196 | 3205 | # Apply AST transformations |
|
3197 | 3206 | try: |
|
3198 | 3207 | code_ast = self.transform_ast(code_ast) |
|
3199 | 3208 | except InputRejected as e: |
|
3200 | 3209 | self.showtraceback() |
|
3201 | 3210 | return error_before_exec(e) |
|
3202 | 3211 | |
|
3203 | 3212 | # Give the displayhook a reference to our ExecutionResult so it |
|
3204 | 3213 | # can fill in the output value. |
|
3205 | 3214 | self.displayhook.exec_result = result |
|
3206 | 3215 | |
|
3207 | 3216 | # Execute the user code |
|
3208 | 3217 | interactivity = "none" if silent else self.ast_node_interactivity |
|
3209 | 3218 | |
|
3210 | 3219 | |
|
3211 | 3220 | has_raised = await self.run_ast_nodes(code_ast.body, cell_name, |
|
3212 | 3221 | interactivity=interactivity, compiler=compiler, result=result) |
|
3213 | 3222 | |
|
3214 | 3223 | self.last_execution_succeeded = not has_raised |
|
3215 | 3224 | self.last_execution_result = result |
|
3216 | 3225 | |
|
3217 | 3226 | # Reset this so later displayed values do not modify the |
|
3218 | 3227 | # ExecutionResult |
|
3219 | 3228 | self.displayhook.exec_result = None |
|
3220 | 3229 | |
|
3221 | 3230 | if store_history: |
|
3222 | 3231 | # Write output to the database. Does nothing unless |
|
3223 | 3232 | # history output logging is enabled. |
|
3224 | 3233 | self.history_manager.store_output(self.execution_count) |
|
3225 | 3234 | # Each cell is a *single* input, regardless of how many lines it has |
|
3226 | 3235 | self.execution_count += 1 |
|
3227 | 3236 | |
|
3228 | 3237 | return result |
|
3229 | 3238 | |
|
3230 | 3239 | def transform_cell(self, raw_cell): |
|
3231 | 3240 | """Transform an input cell before parsing it. |
|
3232 | 3241 | |
|
3233 | 3242 | Static transformations, implemented in IPython.core.inputtransformer2, |
|
3234 | 3243 | deal with things like ``%magic`` and ``!system`` commands. |
|
3235 | 3244 | These run on all input. |
|
3236 | 3245 | Dynamic transformations, for things like unescaped magics and the exit |
|
3237 | 3246 | autocall, depend on the state of the interpreter. |
|
3238 | 3247 | These only apply to single line inputs. |
|
3239 | 3248 | |
|
3240 | 3249 | These string-based transformations are followed by AST transformations; |
|
3241 | 3250 | see :meth:`transform_ast`. |
|
3242 | 3251 | """ |
|
3243 | 3252 | # Static input transformations |
|
3244 | 3253 | cell = self.input_transformer_manager.transform_cell(raw_cell) |
|
3245 | 3254 | |
|
3246 | 3255 | if len(cell.splitlines()) == 1: |
|
3247 | 3256 | # Dynamic transformations - only applied for single line commands |
|
3248 | 3257 | with self.builtin_trap: |
|
3249 | 3258 | # use prefilter_lines to handle trailing newlines |
|
3250 | 3259 | # restore trailing newline for ast.parse |
|
3251 | 3260 | cell = self.prefilter_manager.prefilter_lines(cell) + '\n' |
|
3252 | 3261 | |
|
3253 | 3262 | lines = cell.splitlines(keepends=True) |
|
3254 | 3263 | for transform in self.input_transformers_post: |
|
3255 | 3264 | lines = transform(lines) |
|
3256 | 3265 | cell = ''.join(lines) |
|
3257 | 3266 | |
|
3258 | 3267 | return cell |
|
3259 | 3268 | |
|
3260 | 3269 | def transform_ast(self, node): |
|
3261 | 3270 | """Apply the AST transformations from self.ast_transformers |
|
3262 | 3271 | |
|
3263 | 3272 | Parameters |
|
3264 | 3273 | ---------- |
|
3265 | 3274 | node : ast.Node |
|
3266 | 3275 | The root node to be transformed. Typically called with the ast.Module |
|
3267 | 3276 | produced by parsing user input. |
|
3268 | 3277 | |
|
3269 | 3278 | Returns |
|
3270 | 3279 | ------- |
|
3271 | 3280 | An ast.Node corresponding to the node it was called with. Note that it |
|
3272 | 3281 | may also modify the passed object, so don't rely on references to the |
|
3273 | 3282 | original AST. |
|
3274 | 3283 | """ |
|
3275 | 3284 | for transformer in self.ast_transformers: |
|
3276 | 3285 | try: |
|
3277 | 3286 | node = transformer.visit(node) |
|
3278 | 3287 | except InputRejected: |
|
3279 | 3288 | # User-supplied AST transformers can reject an input by raising |
|
3280 | 3289 | # an InputRejected. Short-circuit in this case so that we |
|
3281 | 3290 | # don't unregister the transform. |
|
3282 | 3291 | raise |
|
3283 | 3292 | except Exception: |
|
3284 | 3293 | warn("AST transformer %r threw an error. It will be unregistered." % transformer) |
|
3285 | 3294 | self.ast_transformers.remove(transformer) |
|
3286 | 3295 | |
|
3287 | 3296 | if self.ast_transformers: |
|
3288 | 3297 | ast.fix_missing_locations(node) |
|
3289 | 3298 | return node |
|
3290 | 3299 | |
|
3291 | 3300 | async def run_ast_nodes( |
|
3292 | 3301 | self, |
|
3293 | 3302 | nodelist: ListType[stmt], |
|
3294 | 3303 | cell_name: str, |
|
3295 | 3304 | interactivity="last_expr", |
|
3296 | 3305 | compiler=compile, |
|
3297 | 3306 | result=None, |
|
3298 | 3307 | ): |
|
3299 | 3308 | """Run a sequence of AST nodes. The execution mode depends on the |
|
3300 | 3309 | interactivity parameter. |
|
3301 | 3310 | |
|
3302 | 3311 | Parameters |
|
3303 | 3312 | ---------- |
|
3304 | 3313 | nodelist : list |
|
3305 | 3314 | A sequence of AST nodes to run. |
|
3306 | 3315 | cell_name : str |
|
3307 | 3316 | Will be passed to the compiler as the filename of the cell. Typically |
|
3308 | 3317 | the value returned by ip.compile.cache(cell). |
|
3309 | 3318 | interactivity : str |
|
3310 | 3319 | 'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none', |
|
3311 | 3320 | specifying which nodes should be run interactively (displaying output |
|
3312 | 3321 | from expressions). 'last_expr' will run the last node interactively |
|
3313 | 3322 | only if it is an expression (i.e. expressions in loops or other blocks |
|
3314 | 3323 | are not displayed) 'last_expr_or_assign' will run the last expression |
|
3315 | 3324 | or the last assignment. Other values for this parameter will raise a |
|
3316 | 3325 | ValueError. |
|
3317 | 3326 | |
|
3318 | 3327 | compiler : callable |
|
3319 | 3328 | A function with the same interface as the built-in compile(), to turn |
|
3320 | 3329 | the AST nodes into code objects. Default is the built-in compile(). |
|
3321 | 3330 | result : ExecutionResult, optional |
|
3322 | 3331 | An object to store exceptions that occur during execution. |
|
3323 | 3332 | |
|
3324 | 3333 | Returns |
|
3325 | 3334 | ------- |
|
3326 | 3335 | True if an exception occurred while running code, False if it finished |
|
3327 | 3336 | running. |
|
3328 | 3337 | """ |
|
3329 | 3338 | if not nodelist: |
|
3330 | 3339 | return |
|
3331 | 3340 | |
|
3332 | 3341 | |
|
3333 | 3342 | if interactivity == 'last_expr_or_assign': |
|
3334 | 3343 | if isinstance(nodelist[-1], _assign_nodes): |
|
3335 | 3344 | asg = nodelist[-1] |
|
3336 | 3345 | if isinstance(asg, ast.Assign) and len(asg.targets) == 1: |
|
3337 | 3346 | target = asg.targets[0] |
|
3338 | 3347 | elif isinstance(asg, _single_targets_nodes): |
|
3339 | 3348 | target = asg.target |
|
3340 | 3349 | else: |
|
3341 | 3350 | target = None |
|
3342 | 3351 | if isinstance(target, ast.Name): |
|
3343 | 3352 | nnode = ast.Expr(ast.Name(target.id, ast.Load())) |
|
3344 | 3353 | ast.fix_missing_locations(nnode) |
|
3345 | 3354 | nodelist.append(nnode) |
|
3346 | 3355 | interactivity = 'last_expr' |
|
3347 | 3356 | |
|
3348 | 3357 | _async = False |
|
3349 | 3358 | if interactivity == 'last_expr': |
|
3350 | 3359 | if isinstance(nodelist[-1], ast.Expr): |
|
3351 | 3360 | interactivity = "last" |
|
3352 | 3361 | else: |
|
3353 | 3362 | interactivity = "none" |
|
3354 | 3363 | |
|
3355 | 3364 | if interactivity == 'none': |
|
3356 | 3365 | to_run_exec, to_run_interactive = nodelist, [] |
|
3357 | 3366 | elif interactivity == 'last': |
|
3358 | 3367 | to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:] |
|
3359 | 3368 | elif interactivity == 'all': |
|
3360 | 3369 | to_run_exec, to_run_interactive = [], nodelist |
|
3361 | 3370 | else: |
|
3362 | 3371 | raise ValueError("Interactivity was %r" % interactivity) |
|
3363 | 3372 | |
|
3364 | 3373 | try: |
|
3365 | 3374 | |
|
3366 | 3375 | def compare(code): |
|
3367 | 3376 | is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE |
|
3368 | 3377 | return is_async |
|
3369 | 3378 | |
|
3370 | 3379 | # refactor that to just change the mod constructor. |
|
3371 | 3380 | to_run = [] |
|
3372 | 3381 | for node in to_run_exec: |
|
3373 | 3382 | to_run.append((node, "exec")) |
|
3374 | 3383 | |
|
3375 | 3384 | for node in to_run_interactive: |
|
3376 | 3385 | to_run.append((node, "single")) |
|
3377 | 3386 | |
|
3378 | 3387 | for node, mode in to_run: |
|
3379 | 3388 | if mode == "exec": |
|
3380 | 3389 | mod = Module([node], []) |
|
3381 | 3390 | elif mode == "single": |
|
3382 | 3391 | mod = ast.Interactive([node]) |
|
3383 | 3392 | with compiler.extra_flags( |
|
3384 | 3393 | getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0) |
|
3385 | 3394 | if self.autoawait |
|
3386 | 3395 | else 0x0 |
|
3387 | 3396 | ): |
|
3388 | 3397 | code = compiler(mod, cell_name, mode) |
|
3389 | 3398 | asy = compare(code) |
|
3390 | 3399 | if await self.run_code(code, result, async_=asy): |
|
3391 | 3400 | return True |
|
3392 | 3401 | |
|
3393 | 3402 | # Flush softspace |
|
3394 | 3403 | if softspace(sys.stdout, 0): |
|
3395 | 3404 | print() |
|
3396 | 3405 | |
|
3397 | 3406 | except: |
|
3398 | 3407 | # It's possible to have exceptions raised here, typically by |
|
3399 | 3408 | # compilation of odd code (such as a naked 'return' outside a |
|
3400 | 3409 | # function) that did parse but isn't valid. Typically the exception |
|
3401 | 3410 | # is a SyntaxError, but it's safest just to catch anything and show |
|
3402 | 3411 | # the user a traceback. |
|
3403 | 3412 | |
|
3404 | 3413 | # We do only one try/except outside the loop to minimize the impact |
|
3405 | 3414 | # on runtime, and also because if any node in the node list is |
|
3406 | 3415 | # broken, we should stop execution completely. |
|
3407 | 3416 | if result: |
|
3408 | 3417 | result.error_before_exec = sys.exc_info()[1] |
|
3409 | 3418 | self.showtraceback() |
|
3410 | 3419 | return True |
|
3411 | 3420 | |
|
3412 | 3421 | return False |
|
3413 | 3422 | |
|
3414 | 3423 | async def run_code(self, code_obj, result=None, *, async_=False): |
|
3415 | 3424 | """Execute a code object. |
|
3416 | 3425 | |
|
3417 | 3426 | When an exception occurs, self.showtraceback() is called to display a |
|
3418 | 3427 | traceback. |
|
3419 | 3428 | |
|
3420 | 3429 | Parameters |
|
3421 | 3430 | ---------- |
|
3422 | 3431 | code_obj : code object |
|
3423 | 3432 | A compiled code object, to be executed |
|
3424 | 3433 | result : ExecutionResult, optional |
|
3425 | 3434 | An object to store exceptions that occur during execution. |
|
3426 | 3435 | async_ : Bool (Experimental) |
|
3427 | 3436 | Attempt to run top-level asynchronous code in a default loop. |
|
3428 | 3437 | |
|
3429 | 3438 | Returns |
|
3430 | 3439 | ------- |
|
3431 | 3440 | False : successful execution. |
|
3432 | 3441 | True : an error occurred. |
|
3433 | 3442 | """ |
|
3434 | 3443 | # special value to say that anything above is IPython and should be |
|
3435 | 3444 | # hidden. |
|
3436 | 3445 | __tracebackhide__ = "__ipython_bottom__" |
|
3437 | 3446 | # Set our own excepthook in case the user code tries to call it |
|
3438 | 3447 | # directly, so that the IPython crash handler doesn't get triggered |
|
3439 | 3448 | old_excepthook, sys.excepthook = sys.excepthook, self.excepthook |
|
3440 | 3449 | |
|
3441 | 3450 | # we save the original sys.excepthook in the instance, in case config |
|
3442 | 3451 | # code (such as magics) needs access to it. |
|
3443 | 3452 | self.sys_excepthook = old_excepthook |
|
3444 | 3453 | outflag = True # happens in more places, so it's easier as default |
|
3445 | 3454 | try: |
|
3446 | 3455 | try: |
|
3447 | 3456 | if async_: |
|
3448 | 3457 | await eval(code_obj, self.user_global_ns, self.user_ns) |
|
3449 | 3458 | else: |
|
3450 | 3459 | exec(code_obj, self.user_global_ns, self.user_ns) |
|
3451 | 3460 | finally: |
|
3452 | 3461 | # Reset our crash handler in place |
|
3453 | 3462 | sys.excepthook = old_excepthook |
|
3454 | 3463 | except SystemExit as e: |
|
3455 | 3464 | if result is not None: |
|
3456 | 3465 | result.error_in_exec = e |
|
3457 | 3466 | self.showtraceback(exception_only=True) |
|
3458 | 3467 | warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1) |
|
3459 | 3468 | except bdb.BdbQuit: |
|
3460 | 3469 | etype, value, tb = sys.exc_info() |
|
3461 | 3470 | if result is not None: |
|
3462 | 3471 | result.error_in_exec = value |
|
3463 | 3472 | # the BdbQuit stops here |
|
3464 | 3473 | except self.custom_exceptions: |
|
3465 | 3474 | etype, value, tb = sys.exc_info() |
|
3466 | 3475 | if result is not None: |
|
3467 | 3476 | result.error_in_exec = value |
|
3468 | 3477 | self.CustomTB(etype, value, tb) |
|
3469 | 3478 | except: |
|
3470 | 3479 | if result is not None: |
|
3471 | 3480 | result.error_in_exec = sys.exc_info()[1] |
|
3472 | 3481 | self.showtraceback(running_compiled_code=True) |
|
3473 | 3482 | else: |
|
3474 | 3483 | outflag = False |
|
3475 | 3484 | return outflag |
|
3476 | 3485 | |
|
3477 | 3486 | # For backwards compatibility |
|
3478 | 3487 | runcode = run_code |
|
3479 | 3488 | |
|
3480 | 3489 | def check_complete(self, code: str) -> Tuple[str, str]: |
|
3481 | 3490 | """Return whether a block of code is ready to execute, or should be continued |
|
3482 | 3491 | |
|
3483 | 3492 | Parameters |
|
3484 | 3493 | ---------- |
|
3485 | 3494 | code : string |
|
3486 | 3495 | Python input code, which can be multiline. |
|
3487 | 3496 | |
|
3488 | 3497 | Returns |
|
3489 | 3498 | ------- |
|
3490 | 3499 | status : str |
|
3491 | 3500 | One of 'complete', 'incomplete', or 'invalid' if source is not a |
|
3492 | 3501 | prefix of valid code. |
|
3493 | 3502 | indent : str |
|
3494 | 3503 | When status is 'incomplete', this is some whitespace to insert on |
|
3495 | 3504 | the next line of the prompt. |
|
3496 | 3505 | """ |
|
3497 | 3506 | status, nspaces = self.input_transformer_manager.check_complete(code) |
|
3498 | 3507 | return status, ' ' * (nspaces or 0) |
|
3499 | 3508 | |
|
3500 | 3509 | #------------------------------------------------------------------------- |
|
3501 | 3510 | # Things related to GUI support and pylab |
|
3502 | 3511 | #------------------------------------------------------------------------- |
|
3503 | 3512 | |
|
3504 | 3513 | active_eventloop = None |
|
3505 | 3514 | |
|
3506 | 3515 | def enable_gui(self, gui=None): |
|
3507 | 3516 | raise NotImplementedError('Implement enable_gui in a subclass') |
|
3508 | 3517 | |
|
3509 | 3518 | def enable_matplotlib(self, gui=None): |
|
3510 | 3519 | """Enable interactive matplotlib and inline figure support. |
|
3511 | 3520 | |
|
3512 | 3521 | This takes the following steps: |
|
3513 | 3522 | |
|
3514 | 3523 | 1. select the appropriate eventloop and matplotlib backend |
|
3515 | 3524 | 2. set up matplotlib for interactive use with that backend |
|
3516 | 3525 | 3. configure formatters for inline figure display |
|
3517 | 3526 | 4. enable the selected gui eventloop |
|
3518 | 3527 | |
|
3519 | 3528 | Parameters |
|
3520 | 3529 | ---------- |
|
3521 | 3530 | gui : optional, string |
|
3522 | 3531 | If given, dictates the choice of matplotlib GUI backend to use |
|
3523 | 3532 | (should be one of IPython's supported backends, 'qt', 'osx', 'tk', |
|
3524 | 3533 | 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by |
|
3525 | 3534 | matplotlib (as dictated by the matplotlib build-time options plus the |
|
3526 | 3535 | user's matplotlibrc configuration file). Note that not all backends |
|
3527 | 3536 | make sense in all contexts, for example a terminal ipython can't |
|
3528 | 3537 | display figures inline. |
|
3529 | 3538 | """ |
|
3530 | 3539 | from matplotlib_inline.backend_inline import configure_inline_support |
|
3531 | 3540 | |
|
3532 | 3541 | from IPython.core import pylabtools as pt |
|
3533 | 3542 | gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select) |
|
3534 | 3543 | |
|
3535 | 3544 | if gui != 'inline': |
|
3536 | 3545 | # If we have our first gui selection, store it |
|
3537 | 3546 | if self.pylab_gui_select is None: |
|
3538 | 3547 | self.pylab_gui_select = gui |
|
3539 | 3548 | # Otherwise if they are different |
|
3540 | 3549 | elif gui != self.pylab_gui_select: |
|
3541 | 3550 | print('Warning: Cannot change to a different GUI toolkit: %s.' |
|
3542 | 3551 | ' Using %s instead.' % (gui, self.pylab_gui_select)) |
|
3543 | 3552 | gui, backend = pt.find_gui_and_backend(self.pylab_gui_select) |
|
3544 | 3553 | |
|
3545 | 3554 | pt.activate_matplotlib(backend) |
|
3546 | 3555 | configure_inline_support(self, backend) |
|
3547 | 3556 | |
|
3548 | 3557 | # Now we must activate the gui pylab wants to use, and fix %run to take |
|
3549 | 3558 | # plot updates into account |
|
3550 | 3559 | self.enable_gui(gui) |
|
3551 | 3560 | self.magics_manager.registry['ExecutionMagics'].default_runner = \ |
|
3552 | 3561 | pt.mpl_runner(self.safe_execfile) |
|
3553 | 3562 | |
|
3554 | 3563 | return gui, backend |
|
3555 | 3564 | |
|
3556 | 3565 | def enable_pylab(self, gui=None, import_all=True, welcome_message=False): |
|
3557 | 3566 | """Activate pylab support at runtime. |
|
3558 | 3567 | |
|
3559 | 3568 | This turns on support for matplotlib, preloads into the interactive |
|
3560 | 3569 | namespace all of numpy and pylab, and configures IPython to correctly |
|
3561 | 3570 | interact with the GUI event loop. The GUI backend to be used can be |
|
3562 | 3571 | optionally selected with the optional ``gui`` argument. |
|
3563 | 3572 | |
|
3564 | 3573 | This method only adds preloading the namespace to InteractiveShell.enable_matplotlib. |
|
3565 | 3574 | |
|
3566 | 3575 | Parameters |
|
3567 | 3576 | ---------- |
|
3568 | 3577 | gui : optional, string |
|
3569 | 3578 | If given, dictates the choice of matplotlib GUI backend to use |
|
3570 | 3579 | (should be one of IPython's supported backends, 'qt', 'osx', 'tk', |
|
3571 | 3580 | 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by |
|
3572 | 3581 | matplotlib (as dictated by the matplotlib build-time options plus the |
|
3573 | 3582 | user's matplotlibrc configuration file). Note that not all backends |
|
3574 | 3583 | make sense in all contexts, for example a terminal ipython can't |
|
3575 | 3584 | display figures inline. |
|
3576 | 3585 | import_all : optional, bool, default: True |
|
3577 | 3586 | Whether to do `from numpy import *` and `from pylab import *` |
|
3578 | 3587 | in addition to module imports. |
|
3579 | 3588 | welcome_message : deprecated |
|
3580 | 3589 | This argument is ignored, no welcome message will be displayed. |
|
3581 | 3590 | """ |
|
3582 | 3591 | from IPython.core.pylabtools import import_pylab |
|
3583 | 3592 | |
|
3584 | 3593 | gui, backend = self.enable_matplotlib(gui) |
|
3585 | 3594 | |
|
3586 | 3595 | # We want to prevent the loading of pylab to pollute the user's |
|
3587 | 3596 | # namespace as shown by the %who* magics, so we execute the activation |
|
3588 | 3597 | # code in an empty namespace, and we update *both* user_ns and |
|
3589 | 3598 | # user_ns_hidden with this information. |
|
3590 | 3599 | ns = {} |
|
3591 | 3600 | import_pylab(ns, import_all) |
|
3592 | 3601 | # warn about clobbered names |
|
3593 | 3602 | ignored = {"__builtins__"} |
|
3594 | 3603 | both = set(ns).intersection(self.user_ns).difference(ignored) |
|
3595 | 3604 | clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ] |
|
3596 | 3605 | self.user_ns.update(ns) |
|
3597 | 3606 | self.user_ns_hidden.update(ns) |
|
3598 | 3607 | return gui, backend, clobbered |
|
3599 | 3608 | |
|
3600 | 3609 | #------------------------------------------------------------------------- |
|
3601 | 3610 | # Utilities |
|
3602 | 3611 | #------------------------------------------------------------------------- |
|
3603 | 3612 | |
|
3604 | 3613 | def var_expand(self, cmd, depth=0, formatter=DollarFormatter()): |
|
3605 | 3614 | """Expand python variables in a string. |
|
3606 | 3615 | |
|
3607 | 3616 | The depth argument indicates how many frames above the caller should |
|
3608 | 3617 | be walked to look for the local namespace where to expand variables. |
|
3609 | 3618 | |
|
3610 | 3619 | The global namespace for expansion is always the user's interactive |
|
3611 | 3620 | namespace. |
|
3612 | 3621 | """ |
|
3613 | 3622 | ns = self.user_ns.copy() |
|
3614 | 3623 | try: |
|
3615 | 3624 | frame = sys._getframe(depth+1) |
|
3616 | 3625 | except ValueError: |
|
3617 | 3626 | # This is thrown if there aren't that many frames on the stack, |
|
3618 | 3627 | # e.g. if a script called run_line_magic() directly. |
|
3619 | 3628 | pass |
|
3620 | 3629 | else: |
|
3621 | 3630 | ns.update(frame.f_locals) |
|
3622 | 3631 | |
|
3623 | 3632 | try: |
|
3624 | 3633 | # We have to use .vformat() here, because 'self' is a valid and common |
|
3625 | 3634 | # name, and expanding **ns for .format() would make it collide with |
|
3626 | 3635 | # the 'self' argument of the method. |
|
3627 | 3636 | cmd = formatter.vformat(cmd, args=[], kwargs=ns) |
|
3628 | 3637 | except Exception: |
|
3629 | 3638 | # if formatter couldn't format, just let it go untransformed |
|
3630 | 3639 | pass |
|
3631 | 3640 | return cmd |
|
3632 | 3641 | |
|
3633 | 3642 | def mktempfile(self, data=None, prefix='ipython_edit_'): |
|
3634 | 3643 | """Make a new tempfile and return its filename. |
|
3635 | 3644 | |
|
3636 | 3645 | This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp), |
|
3637 | 3646 | but it registers the created filename internally so ipython cleans it up |
|
3638 | 3647 | at exit time. |
|
3639 | 3648 | |
|
3640 | 3649 | Optional inputs: |
|
3641 | 3650 | |
|
3642 | 3651 | - data(None): if data is given, it gets written out to the temp file |
|
3643 | 3652 | immediately, and the file is closed again.""" |
|
3644 | 3653 | |
|
3645 | 3654 | dir_path = Path(tempfile.mkdtemp(prefix=prefix)) |
|
3646 | 3655 | self.tempdirs.append(dir_path) |
|
3647 | 3656 | |
|
3648 | 3657 | handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path)) |
|
3649 | 3658 | os.close(handle) # On Windows, there can only be one open handle on a file |
|
3650 | 3659 | |
|
3651 | 3660 | file_path = Path(filename) |
|
3652 | 3661 | self.tempfiles.append(file_path) |
|
3653 | 3662 | |
|
3654 | 3663 | if data: |
|
3655 | 3664 | file_path.write_text(data, encoding="utf-8") |
|
3656 | 3665 | return filename |
|
3657 | 3666 | |
|
3658 | 3667 | def ask_yes_no(self, prompt, default=None, interrupt=None): |
|
3659 | 3668 | if self.quiet: |
|
3660 | 3669 | return True |
|
3661 | 3670 | return ask_yes_no(prompt,default,interrupt) |
|
3662 | 3671 | |
|
3663 | 3672 | def show_usage(self): |
|
3664 | 3673 | """Show a usage message""" |
|
3665 | 3674 | page.page(IPython.core.usage.interactive_usage) |
|
3666 | 3675 | |
|
3667 | 3676 | def extract_input_lines(self, range_str, raw=False): |
|
3668 | 3677 | """Return as a string a set of input history slices. |
|
3669 | 3678 | |
|
3670 | 3679 | Parameters |
|
3671 | 3680 | ---------- |
|
3672 | 3681 | range_str : str |
|
3673 | 3682 | The set of slices is given as a string, like "~5/6-~4/2 4:8 9", |
|
3674 | 3683 | since this function is for use by magic functions which get their |
|
3675 | 3684 | arguments as strings. The number before the / is the session |
|
3676 | 3685 | number: ~n goes n back from the current session. |
|
3677 | 3686 | |
|
3678 | 3687 | If empty string is given, returns history of current session |
|
3679 | 3688 | without the last input. |
|
3680 | 3689 | |
|
3681 | 3690 | raw : bool, optional |
|
3682 | 3691 | By default, the processed input is used. If this is true, the raw |
|
3683 | 3692 | input history is used instead. |
|
3684 | 3693 | |
|
3685 | 3694 | Notes |
|
3686 | 3695 | ----- |
|
3687 | 3696 | Slices can be described with two notations: |
|
3688 | 3697 | |
|
3689 | 3698 | * ``N:M`` -> standard python form, means including items N...(M-1). |
|
3690 | 3699 | * ``N-M`` -> include items N..M (closed endpoint). |
|
3691 | 3700 | """ |
|
3692 | 3701 | lines = self.history_manager.get_range_by_str(range_str, raw=raw) |
|
3693 | 3702 | text = "\n".join(x for _, _, x in lines) |
|
3694 | 3703 | |
|
3695 | 3704 | # Skip the last line, as it's probably the magic that called this |
|
3696 | 3705 | if not range_str: |
|
3697 | 3706 | if "\n" not in text: |
|
3698 | 3707 | text = "" |
|
3699 | 3708 | else: |
|
3700 | 3709 | text = text[: text.rfind("\n")] |
|
3701 | 3710 | |
|
3702 | 3711 | return text |
|
3703 | 3712 | |
|
3704 | 3713 | def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False): |
|
3705 | 3714 | """Get a code string from history, file, url, or a string or macro. |
|
3706 | 3715 | |
|
3707 | 3716 | This is mainly used by magic functions. |
|
3708 | 3717 | |
|
3709 | 3718 | Parameters |
|
3710 | 3719 | ---------- |
|
3711 | 3720 | target : str |
|
3712 | 3721 | A string specifying code to retrieve. This will be tried respectively |
|
3713 | 3722 | as: ranges of input history (see %history for syntax), url, |
|
3714 | 3723 | corresponding .py file, filename, or an expression evaluating to a |
|
3715 | 3724 | string or Macro in the user namespace. |
|
3716 | 3725 | |
|
3717 | 3726 | If empty string is given, returns complete history of current |
|
3718 | 3727 | session, without the last line. |
|
3719 | 3728 | |
|
3720 | 3729 | raw : bool |
|
3721 | 3730 | If true (default), retrieve raw history. Has no effect on the other |
|
3722 | 3731 | retrieval mechanisms. |
|
3723 | 3732 | |
|
3724 | 3733 | py_only : bool (default False) |
|
3725 | 3734 | Only try to fetch python code, do not try alternative methods to decode file |
|
3726 | 3735 | if unicode fails. |
|
3727 | 3736 | |
|
3728 | 3737 | Returns |
|
3729 | 3738 | ------- |
|
3730 | 3739 | A string of code. |
|
3731 | 3740 | ValueError is raised if nothing is found, and TypeError if it evaluates |
|
3732 | 3741 | to an object of another type. In each case, .args[0] is a printable |
|
3733 | 3742 | message. |
|
3734 | 3743 | """ |
|
3735 | 3744 | code = self.extract_input_lines(target, raw=raw) # Grab history |
|
3736 | 3745 | if code: |
|
3737 | 3746 | return code |
|
3738 | 3747 | try: |
|
3739 | 3748 | if target.startswith(('http://', 'https://')): |
|
3740 | 3749 | return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie) |
|
3741 | 3750 | except UnicodeDecodeError as e: |
|
3742 | 3751 | if not py_only : |
|
3743 | 3752 | # Deferred import |
|
3744 | 3753 | from urllib.request import urlopen |
|
3745 | 3754 | response = urlopen(target) |
|
3746 | 3755 | return response.read().decode('latin1') |
|
3747 | 3756 | raise ValueError(("'%s' seem to be unreadable.") % target) from e |
|
3748 | 3757 | |
|
3749 | 3758 | potential_target = [target] |
|
3750 | 3759 | try : |
|
3751 | 3760 | potential_target.insert(0,get_py_filename(target)) |
|
3752 | 3761 | except IOError: |
|
3753 | 3762 | pass |
|
3754 | 3763 | |
|
3755 | 3764 | for tgt in potential_target : |
|
3756 | 3765 | if os.path.isfile(tgt): # Read file |
|
3757 | 3766 | try : |
|
3758 | 3767 | return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie) |
|
3759 | 3768 | except UnicodeDecodeError as e: |
|
3760 | 3769 | if not py_only : |
|
3761 | 3770 | with io_open(tgt,'r', encoding='latin1') as f : |
|
3762 | 3771 | return f.read() |
|
3763 | 3772 | raise ValueError(("'%s' seem to be unreadable.") % target) from e |
|
3764 | 3773 | elif os.path.isdir(os.path.expanduser(tgt)): |
|
3765 | 3774 | raise ValueError("'%s' is a directory, not a regular file." % target) |
|
3766 | 3775 | |
|
3767 | 3776 | if search_ns: |
|
3768 | 3777 | # Inspect namespace to load object source |
|
3769 | 3778 | object_info = self.object_inspect(target, detail_level=1) |
|
3770 | 3779 | if object_info['found'] and object_info['source']: |
|
3771 | 3780 | return object_info['source'] |
|
3772 | 3781 | |
|
3773 | 3782 | try: # User namespace |
|
3774 | 3783 | codeobj = eval(target, self.user_ns) |
|
3775 | 3784 | except Exception as e: |
|
3776 | 3785 | raise ValueError(("'%s' was not found in history, as a file, url, " |
|
3777 | 3786 | "nor in the user namespace.") % target) from e |
|
3778 | 3787 | |
|
3779 | 3788 | if isinstance(codeobj, str): |
|
3780 | 3789 | return codeobj |
|
3781 | 3790 | elif isinstance(codeobj, Macro): |
|
3782 | 3791 | return codeobj.value |
|
3783 | 3792 | |
|
3784 | 3793 | raise TypeError("%s is neither a string nor a macro." % target, |
|
3785 | 3794 | codeobj) |
|
3786 | 3795 | |
|
3787 | 3796 | def _atexit_once(self): |
|
3788 | 3797 | """ |
|
3789 | 3798 | At exist operation that need to be called at most once. |
|
3790 | 3799 | Second call to this function per instance will do nothing. |
|
3791 | 3800 | """ |
|
3792 | 3801 | |
|
3793 | 3802 | if not getattr(self, "_atexit_once_called", False): |
|
3794 | 3803 | self._atexit_once_called = True |
|
3795 | 3804 | # Clear all user namespaces to release all references cleanly. |
|
3796 | 3805 | self.reset(new_session=False) |
|
3797 | 3806 | # Close the history session (this stores the end time and line count) |
|
3798 | 3807 | # this must be *before* the tempfile cleanup, in case of temporary |
|
3799 | 3808 | # history db |
|
3800 | 3809 | self.history_manager.end_session() |
|
3801 | 3810 | self.history_manager = None |
|
3802 | 3811 | |
|
3803 | 3812 | #------------------------------------------------------------------------- |
|
3804 | 3813 | # Things related to IPython exiting |
|
3805 | 3814 | #------------------------------------------------------------------------- |
|
3806 | 3815 | def atexit_operations(self): |
|
3807 | 3816 | """This will be executed at the time of exit. |
|
3808 | 3817 | |
|
3809 | 3818 | Cleanup operations and saving of persistent data that is done |
|
3810 | 3819 | unconditionally by IPython should be performed here. |
|
3811 | 3820 | |
|
3812 | 3821 | For things that may depend on startup flags or platform specifics (such |
|
3813 | 3822 | as having readline or not), register a separate atexit function in the |
|
3814 | 3823 | code that has the appropriate information, rather than trying to |
|
3815 | 3824 | clutter |
|
3816 | 3825 | """ |
|
3817 | 3826 | self._atexit_once() |
|
3818 | 3827 | |
|
3819 | 3828 | # Cleanup all tempfiles and folders left around |
|
3820 | 3829 | for tfile in self.tempfiles: |
|
3821 | 3830 | try: |
|
3822 | 3831 | tfile.unlink() |
|
3823 | 3832 | self.tempfiles.remove(tfile) |
|
3824 | 3833 | except FileNotFoundError: |
|
3825 | 3834 | pass |
|
3826 | 3835 | del self.tempfiles |
|
3827 | 3836 | for tdir in self.tempdirs: |
|
3828 | 3837 | try: |
|
3829 | 3838 | tdir.rmdir() |
|
3830 | 3839 | self.tempdirs.remove(tdir) |
|
3831 | 3840 | except FileNotFoundError: |
|
3832 | 3841 | pass |
|
3833 | 3842 | del self.tempdirs |
|
3834 | 3843 | |
|
3835 | 3844 | # Restore user's cursor |
|
3836 | 3845 | if hasattr(self, "editing_mode") and self.editing_mode == "vi": |
|
3837 | 3846 | sys.stdout.write("\x1b[0 q") |
|
3838 | 3847 | sys.stdout.flush() |
|
3839 | 3848 | |
|
3840 | 3849 | def cleanup(self): |
|
3841 | 3850 | self.restore_sys_module_state() |
|
3842 | 3851 | |
|
3843 | 3852 | |
|
3844 | 3853 | # Overridden in terminal subclass to change prompts |
|
3845 | 3854 | def switch_doctest_mode(self, mode): |
|
3846 | 3855 | pass |
|
3847 | 3856 | |
|
3848 | 3857 | |
|
3849 | 3858 | class InteractiveShellABC(metaclass=abc.ABCMeta): |
|
3850 | 3859 | """An abstract base class for InteractiveShell.""" |
|
3851 | 3860 | |
|
3852 | 3861 | InteractiveShellABC.register(InteractiveShell) |
@@ -1,212 +1,140 b'' | |||
|
1 | 1 | """Implementation of configuration-related magic functions. |
|
2 | 2 | """ |
|
3 | 3 | #----------------------------------------------------------------------------- |
|
4 | 4 | # Copyright (c) 2012 The IPython Development Team. |
|
5 | 5 | # |
|
6 | 6 | # Distributed under the terms of the Modified BSD License. |
|
7 | 7 | # |
|
8 | 8 | # The full license is in the file COPYING.txt, distributed with this software. |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | # Imports |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 | 15 | # Stdlib |
|
16 | 16 | import re |
|
17 | 17 | |
|
18 | 18 | # Our own packages |
|
19 | 19 | from IPython.core.error import UsageError |
|
20 | 20 | from IPython.core.magic import Magics, magics_class, line_magic |
|
21 | 21 | from logging import error |
|
22 | 22 | |
|
23 | 23 | #----------------------------------------------------------------------------- |
|
24 | 24 | # Magic implementation classes |
|
25 | 25 | #----------------------------------------------------------------------------- |
|
26 | 26 | |
|
27 | 27 | reg = re.compile(r'^\w+\.\w+$') |
|
28 | 28 | @magics_class |
|
29 | 29 | class ConfigMagics(Magics): |
|
30 | 30 | |
|
31 | 31 | def __init__(self, shell): |
|
32 | 32 | super(ConfigMagics, self).__init__(shell) |
|
33 | 33 | self.configurables = [] |
|
34 | 34 | |
|
35 | 35 | @line_magic |
|
36 | 36 | def config(self, s): |
|
37 | 37 | """configure IPython |
|
38 | 38 | |
|
39 | 39 | %config Class[.trait=value] |
|
40 | 40 | |
|
41 | 41 | This magic exposes most of the IPython config system. Any |
|
42 | 42 | Configurable class should be able to be configured with the simple |
|
43 | 43 | line:: |
|
44 | 44 | |
|
45 | 45 | %config Class.trait=value |
|
46 | 46 | |
|
47 | 47 | Where `value` will be resolved in the user's namespace, if it is an |
|
48 | 48 | expression or variable name. |
|
49 | 49 | |
|
50 | 50 | Examples |
|
51 | 51 | -------- |
|
52 | 52 | |
|
53 | 53 | To see what classes are available for config, pass no arguments:: |
|
54 | 54 | |
|
55 | 55 | In [1]: %config |
|
56 | 56 | Available objects for config: |
|
57 | 57 | AliasManager |
|
58 | 58 | DisplayFormatter |
|
59 | 59 | HistoryManager |
|
60 | 60 | IPCompleter |
|
61 | 61 | LoggingMagics |
|
62 | 62 | MagicsManager |
|
63 | 63 | OSMagics |
|
64 | 64 | PrefilterManager |
|
65 | 65 | ScriptMagics |
|
66 | 66 | TerminalInteractiveShell |
|
67 | 67 | |
|
68 | 68 | To view what is configurable on a given class, just pass the class |
|
69 | 69 | name:: |
|
70 | 70 | |
|
71 |
In [2]: %config |
|
|
72 | IPCompleter(Completer) options | |
|
73 |
--------------------------- |
|
|
74 | IPCompleter.backslash_combining_completions=<Bool> | |
|
75 | Enable unicode completions, e.g. \\alpha<tab> . Includes completion of latex | |
|
76 | commands, unicode names, and expanding unicode characters back to latex | |
|
77 | commands. | |
|
78 | Current: True | |
|
79 | IPCompleter.debug=<Bool> | |
|
80 | Enable debug for the Completer. Mostly print extra information for | |
|
81 | experimental jedi integration. | |
|
71 | In [2]: %config LoggingMagics | |
|
72 | LoggingMagics(Magics) options | |
|
73 | --------------------------- | |
|
74 | LoggingMagics.quiet=<Bool> | |
|
75 | Suppress output of log state when logging is enabled | |
|
82 | 76 | Current: False |
|
83 | IPCompleter.disable_matchers=<list-item-1>... | |
|
84 | List of matchers to disable. | |
|
85 | The list should contain matcher identifiers (see | |
|
86 | :any:`completion_matcher`). | |
|
87 | Current: [] | |
|
88 | IPCompleter.greedy=<Bool> | |
|
89 | Activate greedy completion | |
|
90 | PENDING DEPRECATION. this is now mostly taken care of with Jedi. | |
|
91 | This will enable completion on elements of lists, results of function calls, etc., | |
|
92 | but can be unsafe because the code is actually evaluated on TAB. | |
|
93 | Current: False | |
|
94 | IPCompleter.jedi_compute_type_timeout=<Int> | |
|
95 | Experimental: restrict time (in milliseconds) during which Jedi can compute types. | |
|
96 | Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt | |
|
97 | performance by preventing jedi to build its cache. | |
|
98 | Current: 400 | |
|
99 | IPCompleter.limit_to__all__=<Bool> | |
|
100 | DEPRECATED as of version 5.0. | |
|
101 | Instruct the completer to use __all__ for the completion | |
|
102 | Specifically, when completing on ``object.<tab>``. | |
|
103 | When True: only those names in obj.__all__ will be included. | |
|
104 | When False [default]: the __all__ attribute is ignored | |
|
105 | Current: False | |
|
106 | IPCompleter.merge_completions=<Bool> | |
|
107 | Whether to merge completion results into a single list | |
|
108 | If False, only the completion results from the first non-empty | |
|
109 | completer will be returned. | |
|
110 | As of version 8.6.0, setting the value to ``False`` is an alias for: | |
|
111 | ``IPCompleter.suppress_competing_matchers = True.``. | |
|
112 | Current: True | |
|
113 | IPCompleter.omit__names=<Enum> | |
|
114 | Instruct the completer to omit private method names | |
|
115 | Specifically, when completing on ``object.<tab>``. | |
|
116 | When 2 [default]: all names that start with '_' will be excluded. | |
|
117 | When 1: all 'magic' names (``__foo__``) will be excluded. | |
|
118 | When 0: nothing will be excluded. | |
|
119 | Choices: any of [0, 1, 2] | |
|
120 | Current: 2 | |
|
121 | IPCompleter.profile_completions=<Bool> | |
|
122 | If True, emit profiling data for completion subsystem using cProfile. | |
|
123 | Current: False | |
|
124 | IPCompleter.profiler_output_dir=<Unicode> | |
|
125 | Template for path at which to output profile data for completions. | |
|
126 | Current: '.completion_profiles' | |
|
127 | IPCompleter.suppress_competing_matchers=<Union> | |
|
128 | Whether to suppress completions from other *Matchers*. | |
|
129 | When set to ``None`` (default) the matchers will attempt to auto-detect | |
|
130 | whether suppression of other matchers is desirable. For example, at the | |
|
131 | beginning of a line followed by `%` we expect a magic completion to be the | |
|
132 | only applicable option, and after ``my_dict['`` we usually expect a | |
|
133 | completion with an existing dictionary key. | |
|
134 | If you want to disable this heuristic and see completions from all matchers, | |
|
135 | set ``IPCompleter.suppress_competing_matchers = False``. To disable the | |
|
136 | heuristic for specific matchers provide a dictionary mapping: | |
|
137 | ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': | |
|
138 | False}``. | |
|
139 | Set ``IPCompleter.suppress_competing_matchers = True`` to limit completions | |
|
140 | to the set of matchers with the highest priority; this is equivalent to | |
|
141 | ``IPCompleter.merge_completions`` and can be beneficial for performance, but | |
|
142 | will sometimes omit relevant candidates from matchers further down the | |
|
143 | priority list. | |
|
144 | Current: None | |
|
145 | IPCompleter.use_jedi=<Bool> | |
|
146 | Experimental: Use Jedi to generate autocompletions. Default to True if jedi | |
|
147 | is installed. | |
|
148 | Current: True | |
|
149 | 77 | |
|
150 | 78 | but the real use is in setting values:: |
|
151 | 79 | |
|
152 |
In [3]: %config |
|
|
80 | In [3]: %config LoggingMagics.quiet = True | |
|
153 | 81 | |
|
154 | 82 | and these values are read from the user_ns if they are variables:: |
|
155 | 83 | |
|
156 |
In [4]: feeling_ |
|
|
84 | In [4]: feeling_quiet=False | |
|
157 | 85 | |
|
158 |
In [5]: %config |
|
|
86 | In [5]: %config LoggingMagics.quiet = feeling_quiet | |
|
159 | 87 | |
|
160 | 88 | """ |
|
161 | 89 | from traitlets.config.loader import Config |
|
162 | 90 | # some IPython objects are Configurable, but do not yet have |
|
163 | 91 | # any configurable traits. Exclude them from the effects of |
|
164 | 92 | # this magic, as their presence is just noise: |
|
165 | 93 | configurables = sorted(set([ c for c in self.shell.configurables |
|
166 | 94 | if c.__class__.class_traits(config=True) |
|
167 | 95 | ]), key=lambda x: x.__class__.__name__) |
|
168 | 96 | classnames = [ c.__class__.__name__ for c in configurables ] |
|
169 | 97 | |
|
170 | 98 | line = s.strip() |
|
171 | 99 | if not line: |
|
172 | 100 | # print available configurable names |
|
173 | 101 | print("Available objects for config:") |
|
174 | 102 | for name in classnames: |
|
175 | 103 | print(" ", name) |
|
176 | 104 | return |
|
177 | 105 | elif line in classnames: |
|
178 | 106 | # `%config TerminalInteractiveShell` will print trait info for |
|
179 | 107 | # TerminalInteractiveShell |
|
180 | 108 | c = configurables[classnames.index(line)] |
|
181 | 109 | cls = c.__class__ |
|
182 | 110 | help = cls.class_get_help(c) |
|
183 | 111 | # strip leading '--' from cl-args: |
|
184 | 112 | help = re.sub(re.compile(r'^--', re.MULTILINE), '', help) |
|
185 | 113 | print(help) |
|
186 | 114 | return |
|
187 | 115 | elif reg.match(line): |
|
188 | 116 | cls, attr = line.split('.') |
|
189 | 117 | return getattr(configurables[classnames.index(cls)],attr) |
|
190 | 118 | elif '=' not in line: |
|
191 | 119 | msg = "Invalid config statement: %r, "\ |
|
192 | 120 | "should be `Class.trait = value`." |
|
193 | 121 | |
|
194 | 122 | ll = line.lower() |
|
195 | 123 | for classname in classnames: |
|
196 | 124 | if ll == classname.lower(): |
|
197 | 125 | msg = msg + '\nDid you mean %s (note the case)?' % classname |
|
198 | 126 | break |
|
199 | 127 | |
|
200 | 128 | raise UsageError( msg % line) |
|
201 | 129 | |
|
202 | 130 | # otherwise, assume we are setting configurables. |
|
203 | 131 | # leave quotes on args when splitting, because we want |
|
204 | 132 | # unquoted args to eval in user_ns |
|
205 | 133 | cfg = Config() |
|
206 | 134 | exec("cfg."+line, self.shell.user_ns, locals()) |
|
207 | 135 | |
|
208 | 136 | for configurable in configurables: |
|
209 | 137 | try: |
|
210 | 138 | configurable.update_config(cfg) |
|
211 | 139 | except Exception as e: |
|
212 | 140 | error(e) |
@@ -1,362 +1,362 b'' | |||
|
1 | 1 | """Magic functions for running cells in various scripts.""" |
|
2 | 2 | |
|
3 | 3 | # Copyright (c) IPython Development Team. |
|
4 | 4 | # Distributed under the terms of the Modified BSD License. |
|
5 | 5 | |
|
6 | 6 | import asyncio |
|
7 | 7 | import atexit |
|
8 | 8 | import errno |
|
9 | 9 | import os |
|
10 | 10 | import signal |
|
11 | 11 | import sys |
|
12 | 12 | import time |
|
13 | 13 | from subprocess import CalledProcessError |
|
14 | 14 | from threading import Thread |
|
15 | 15 | |
|
16 | 16 | from traitlets import Any, Dict, List, default |
|
17 | 17 | |
|
18 | 18 | from IPython.core import magic_arguments |
|
19 | 19 | from IPython.core.async_helpers import _AsyncIOProxy |
|
20 | 20 | from IPython.core.magic import Magics, cell_magic, line_magic, magics_class |
|
21 | 21 | from IPython.utils.process import arg_split |
|
22 | 22 | |
|
23 | 23 | #----------------------------------------------------------------------------- |
|
24 | 24 | # Magic implementation classes |
|
25 | 25 | #----------------------------------------------------------------------------- |
|
26 | 26 | |
|
27 | 27 | def script_args(f): |
|
28 | 28 | """single decorator for adding script args""" |
|
29 | 29 | args = [ |
|
30 | 30 | magic_arguments.argument( |
|
31 | 31 | '--out', type=str, |
|
32 | 32 | help="""The variable in which to store stdout from the script. |
|
33 | 33 | If the script is backgrounded, this will be the stdout *pipe*, |
|
34 | 34 | instead of the stderr text itself and will not be auto closed. |
|
35 | 35 | """ |
|
36 | 36 | ), |
|
37 | 37 | magic_arguments.argument( |
|
38 | 38 | '--err', type=str, |
|
39 | 39 | help="""The variable in which to store stderr from the script. |
|
40 | 40 | If the script is backgrounded, this will be the stderr *pipe*, |
|
41 | 41 | instead of the stderr text itself and will not be autoclosed. |
|
42 | 42 | """ |
|
43 | 43 | ), |
|
44 | 44 | magic_arguments.argument( |
|
45 | 45 | '--bg', action="store_true", |
|
46 | 46 | help="""Whether to run the script in the background. |
|
47 | 47 | If given, the only way to see the output of the command is |
|
48 | 48 | with --out/err. |
|
49 | 49 | """ |
|
50 | 50 | ), |
|
51 | 51 | magic_arguments.argument( |
|
52 | 52 | '--proc', type=str, |
|
53 | 53 | help="""The variable in which to store Popen instance. |
|
54 | 54 | This is used only when --bg option is given. |
|
55 | 55 | """ |
|
56 | 56 | ), |
|
57 | 57 | magic_arguments.argument( |
|
58 | 58 | '--no-raise-error', action="store_false", dest='raise_error', |
|
59 | 59 | help="""Whether you should raise an error message in addition to |
|
60 | 60 | a stream on stderr if you get a nonzero exit code. |
|
61 | 61 | """, |
|
62 | 62 | ), |
|
63 | 63 | ] |
|
64 | 64 | for arg in args: |
|
65 | 65 | f = arg(f) |
|
66 | 66 | return f |
|
67 | 67 | |
|
68 | 68 | |
|
69 | 69 | @magics_class |
|
70 | 70 | class ScriptMagics(Magics): |
|
71 | 71 | """Magics for talking to scripts |
|
72 | 72 | |
|
73 | 73 | This defines a base `%%script` cell magic for running a cell |
|
74 | 74 | with a program in a subprocess, and registers a few top-level |
|
75 | 75 | magics that call %%script with common interpreters. |
|
76 | 76 | """ |
|
77 | 77 | |
|
78 | 78 | event_loop = Any( |
|
79 | 79 | help=""" |
|
80 | 80 | The event loop on which to run subprocesses |
|
81 | 81 | |
|
82 | 82 | Not the main event loop, |
|
83 | 83 | because we want to be able to make blocking calls |
|
84 | 84 | and have certain requirements we don't want to impose on the main loop. |
|
85 | 85 | """ |
|
86 | 86 | ) |
|
87 | 87 | |
|
88 | 88 | script_magics = List( |
|
89 | 89 | help="""Extra script cell magics to define |
|
90 | 90 | |
|
91 | 91 | This generates simple wrappers of `%%script foo` as `%%foo`. |
|
92 | 92 | |
|
93 | 93 | If you want to add script magics that aren't on your path, |
|
94 | 94 | specify them in script_paths |
|
95 | 95 | """, |
|
96 | 96 | ).tag(config=True) |
|
97 | 97 | @default('script_magics') |
|
98 | 98 | def _script_magics_default(self): |
|
99 | 99 | """default to a common list of programs""" |
|
100 | 100 | |
|
101 | 101 | defaults = [ |
|
102 | 102 | 'sh', |
|
103 | 103 | 'bash', |
|
104 | 104 | 'perl', |
|
105 | 105 | 'ruby', |
|
106 | 106 | 'python', |
|
107 | 107 | 'python2', |
|
108 | 108 | 'python3', |
|
109 | 109 | 'pypy', |
|
110 | 110 | ] |
|
111 | 111 | if os.name == 'nt': |
|
112 | 112 | defaults.extend([ |
|
113 | 113 | 'cmd', |
|
114 | 114 | ]) |
|
115 | 115 | |
|
116 | 116 | return defaults |
|
117 | 117 | |
|
118 | 118 | script_paths = Dict( |
|
119 | 119 | help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby' |
|
120 | 120 | |
|
121 | 121 | Only necessary for items in script_magics where the default path will not |
|
122 | 122 | find the right interpreter. |
|
123 | 123 | """ |
|
124 | 124 | ).tag(config=True) |
|
125 | 125 | |
|
126 | 126 | def __init__(self, shell=None): |
|
127 | 127 | super(ScriptMagics, self).__init__(shell=shell) |
|
128 | 128 | self._generate_script_magics() |
|
129 | 129 | self.bg_processes = [] |
|
130 | 130 | atexit.register(self.kill_bg_processes) |
|
131 | 131 | |
|
132 | 132 | def __del__(self): |
|
133 | 133 | self.kill_bg_processes() |
|
134 | 134 | |
|
135 | 135 | def _generate_script_magics(self): |
|
136 | 136 | cell_magics = self.magics['cell'] |
|
137 | 137 | for name in self.script_magics: |
|
138 | 138 | cell_magics[name] = self._make_script_magic(name) |
|
139 | 139 | |
|
140 | 140 | def _make_script_magic(self, name): |
|
141 | 141 | """make a named magic, that calls %%script with a particular program""" |
|
142 | 142 | # expand to explicit path if necessary: |
|
143 | 143 | script = self.script_paths.get(name, name) |
|
144 | 144 | |
|
145 | 145 | @magic_arguments.magic_arguments() |
|
146 | 146 | @script_args |
|
147 | 147 | def named_script_magic(line, cell): |
|
148 | 148 | # if line, add it as cl-flags |
|
149 | 149 | if line: |
|
150 | 150 | line = "%s %s" % (script, line) |
|
151 | 151 | else: |
|
152 | 152 | line = script |
|
153 | 153 | return self.shebang(line, cell) |
|
154 | 154 | |
|
155 | 155 | # write a basic docstring: |
|
156 | 156 | named_script_magic.__doc__ = \ |
|
157 | 157 | """%%{name} script magic |
|
158 | 158 | |
|
159 | 159 | Run cells with {script} in a subprocess. |
|
160 | 160 | |
|
161 | 161 | This is a shortcut for `%%script {script}` |
|
162 | 162 | """.format(**locals()) |
|
163 | 163 | |
|
164 | 164 | return named_script_magic |
|
165 | 165 | |
|
166 | 166 | @magic_arguments.magic_arguments() |
|
167 | 167 | @script_args |
|
168 | 168 | @cell_magic("script") |
|
169 | 169 | def shebang(self, line, cell): |
|
170 | 170 | """Run a cell via a shell command |
|
171 | 171 | |
|
172 | 172 | The `%%script` line is like the #! line of script, |
|
173 | 173 | specifying a program (bash, perl, ruby, etc.) with which to run. |
|
174 | 174 | |
|
175 | 175 | The rest of the cell is run by that program. |
|
176 | 176 | |
|
177 | 177 | Examples |
|
178 | 178 | -------- |
|
179 | 179 | :: |
|
180 | 180 | |
|
181 | 181 | In [1]: %%script bash |
|
182 | 182 | ...: for i in 1 2 3; do |
|
183 | 183 | ...: echo $i |
|
184 | 184 | ...: done |
|
185 | 185 | 1 |
|
186 | 186 | 2 |
|
187 | 187 | 3 |
|
188 | 188 | """ |
|
189 | 189 | |
|
190 | 190 | # Create the event loop in which to run script magics |
|
191 | 191 | # this operates on a background thread |
|
192 | 192 | if self.event_loop is None: |
|
193 | 193 | if sys.platform == "win32": |
|
194 | 194 | # don't override the current policy, |
|
195 | 195 | # just create an event loop |
|
196 | 196 | event_loop = asyncio.WindowsProactorEventLoopPolicy().new_event_loop() |
|
197 | 197 | else: |
|
198 | 198 | event_loop = asyncio.new_event_loop() |
|
199 | 199 | self.event_loop = event_loop |
|
200 | 200 | |
|
201 | 201 | # start the loop in a background thread |
|
202 | 202 | asyncio_thread = Thread(target=event_loop.run_forever, daemon=True) |
|
203 | 203 | asyncio_thread.start() |
|
204 | 204 | else: |
|
205 | 205 | event_loop = self.event_loop |
|
206 | 206 | |
|
207 | 207 | def in_thread(coro): |
|
208 | 208 | """Call a coroutine on the asyncio thread""" |
|
209 | 209 | return asyncio.run_coroutine_threadsafe(coro, event_loop).result() |
|
210 | 210 | |
|
211 | 211 | async def _handle_stream(stream, stream_arg, file_object): |
|
212 | 212 | while True: |
|
213 | line = (await stream.readline()).decode("utf8") | |
|
213 | line = (await stream.readline()).decode("utf8", errors="replace") | |
|
214 | 214 | if not line: |
|
215 | 215 | break |
|
216 | 216 | if stream_arg: |
|
217 | 217 | self.shell.user_ns[stream_arg] = line |
|
218 | 218 | else: |
|
219 | 219 | file_object.write(line) |
|
220 | 220 | file_object.flush() |
|
221 | 221 | |
|
222 | 222 | async def _stream_communicate(process, cell): |
|
223 | 223 | process.stdin.write(cell) |
|
224 | 224 | process.stdin.close() |
|
225 | 225 | stdout_task = asyncio.create_task( |
|
226 | 226 | _handle_stream(process.stdout, args.out, sys.stdout) |
|
227 | 227 | ) |
|
228 | 228 | stderr_task = asyncio.create_task( |
|
229 | 229 | _handle_stream(process.stderr, args.err, sys.stderr) |
|
230 | 230 | ) |
|
231 | 231 | await asyncio.wait([stdout_task, stderr_task]) |
|
232 | 232 | await process.wait() |
|
233 | 233 | |
|
234 | 234 | argv = arg_split(line, posix=not sys.platform.startswith("win")) |
|
235 | 235 | args, cmd = self.shebang.parser.parse_known_args(argv) |
|
236 | 236 | |
|
237 | 237 | try: |
|
238 | 238 | p = in_thread( |
|
239 | 239 | asyncio.create_subprocess_exec( |
|
240 | 240 | *cmd, |
|
241 | 241 | stdout=asyncio.subprocess.PIPE, |
|
242 | 242 | stderr=asyncio.subprocess.PIPE, |
|
243 | 243 | stdin=asyncio.subprocess.PIPE, |
|
244 | 244 | ) |
|
245 | 245 | ) |
|
246 | 246 | except OSError as e: |
|
247 | 247 | if e.errno == errno.ENOENT: |
|
248 | 248 | print("Couldn't find program: %r" % cmd[0]) |
|
249 | 249 | return |
|
250 | 250 | else: |
|
251 | 251 | raise |
|
252 | 252 | |
|
253 | 253 | if not cell.endswith('\n'): |
|
254 | 254 | cell += '\n' |
|
255 | 255 | cell = cell.encode('utf8', 'replace') |
|
256 | 256 | if args.bg: |
|
257 | 257 | self.bg_processes.append(p) |
|
258 | 258 | self._gc_bg_processes() |
|
259 | 259 | to_close = [] |
|
260 | 260 | if args.out: |
|
261 | 261 | self.shell.user_ns[args.out] = _AsyncIOProxy(p.stdout, event_loop) |
|
262 | 262 | else: |
|
263 | 263 | to_close.append(p.stdout) |
|
264 | 264 | if args.err: |
|
265 | 265 | self.shell.user_ns[args.err] = _AsyncIOProxy(p.stderr, event_loop) |
|
266 | 266 | else: |
|
267 | 267 | to_close.append(p.stderr) |
|
268 | 268 | event_loop.call_soon_threadsafe( |
|
269 | 269 | lambda: asyncio.Task(self._run_script(p, cell, to_close)) |
|
270 | 270 | ) |
|
271 | 271 | if args.proc: |
|
272 | 272 | proc_proxy = _AsyncIOProxy(p, event_loop) |
|
273 | 273 | proc_proxy.stdout = _AsyncIOProxy(p.stdout, event_loop) |
|
274 | 274 | proc_proxy.stderr = _AsyncIOProxy(p.stderr, event_loop) |
|
275 | 275 | self.shell.user_ns[args.proc] = proc_proxy |
|
276 | 276 | return |
|
277 | 277 | |
|
278 | 278 | try: |
|
279 | 279 | in_thread(_stream_communicate(p, cell)) |
|
280 | 280 | except KeyboardInterrupt: |
|
281 | 281 | try: |
|
282 | 282 | p.send_signal(signal.SIGINT) |
|
283 | 283 | in_thread(asyncio.wait_for(p.wait(), timeout=0.1)) |
|
284 | 284 | if p.returncode is not None: |
|
285 | 285 | print("Process is interrupted.") |
|
286 | 286 | return |
|
287 | 287 | p.terminate() |
|
288 | 288 | in_thread(asyncio.wait_for(p.wait(), timeout=0.1)) |
|
289 | 289 | if p.returncode is not None: |
|
290 | 290 | print("Process is terminated.") |
|
291 | 291 | return |
|
292 | 292 | p.kill() |
|
293 | 293 | print("Process is killed.") |
|
294 | 294 | except OSError: |
|
295 | 295 | pass |
|
296 | 296 | except Exception as e: |
|
297 | 297 | print("Error while terminating subprocess (pid=%i): %s" % (p.pid, e)) |
|
298 | 298 | return |
|
299 | 299 | |
|
300 | 300 | if args.raise_error and p.returncode != 0: |
|
301 | 301 | # If we get here and p.returncode is still None, we must have |
|
302 | 302 | # killed it but not yet seen its return code. We don't wait for it, |
|
303 | 303 | # in case it's stuck in uninterruptible sleep. -9 = SIGKILL |
|
304 | 304 | rc = p.returncode or -9 |
|
305 | 305 | raise CalledProcessError(rc, cell) |
|
306 | 306 | |
|
307 | 307 | shebang.__skip_doctest__ = os.name != "posix" |
|
308 | 308 | |
|
309 | 309 | async def _run_script(self, p, cell, to_close): |
|
310 | 310 | """callback for running the script in the background""" |
|
311 | 311 | |
|
312 | 312 | p.stdin.write(cell) |
|
313 | 313 | await p.stdin.drain() |
|
314 | 314 | p.stdin.close() |
|
315 | 315 | await p.stdin.wait_closed() |
|
316 | 316 | await p.wait() |
|
317 | 317 | # asyncio read pipes have no close |
|
318 | 318 | # but we should drain the data anyway |
|
319 | 319 | for s in to_close: |
|
320 | 320 | await s.read() |
|
321 | 321 | self._gc_bg_processes() |
|
322 | 322 | |
|
323 | 323 | @line_magic("killbgscripts") |
|
324 | 324 | def killbgscripts(self, _nouse_=''): |
|
325 | 325 | """Kill all BG processes started by %%script and its family.""" |
|
326 | 326 | self.kill_bg_processes() |
|
327 | 327 | print("All background processes were killed.") |
|
328 | 328 | |
|
329 | 329 | def kill_bg_processes(self): |
|
330 | 330 | """Kill all BG processes which are still running.""" |
|
331 | 331 | if not self.bg_processes: |
|
332 | 332 | return |
|
333 | 333 | for p in self.bg_processes: |
|
334 | 334 | if p.returncode is None: |
|
335 | 335 | try: |
|
336 | 336 | p.send_signal(signal.SIGINT) |
|
337 | 337 | except: |
|
338 | 338 | pass |
|
339 | 339 | time.sleep(0.1) |
|
340 | 340 | self._gc_bg_processes() |
|
341 | 341 | if not self.bg_processes: |
|
342 | 342 | return |
|
343 | 343 | for p in self.bg_processes: |
|
344 | 344 | if p.returncode is None: |
|
345 | 345 | try: |
|
346 | 346 | p.terminate() |
|
347 | 347 | except: |
|
348 | 348 | pass |
|
349 | 349 | time.sleep(0.1) |
|
350 | 350 | self._gc_bg_processes() |
|
351 | 351 | if not self.bg_processes: |
|
352 | 352 | return |
|
353 | 353 | for p in self.bg_processes: |
|
354 | 354 | if p.returncode is None: |
|
355 | 355 | try: |
|
356 | 356 | p.kill() |
|
357 | 357 | except: |
|
358 | 358 | pass |
|
359 | 359 | self._gc_bg_processes() |
|
360 | 360 | |
|
361 | 361 | def _gc_bg_processes(self): |
|
362 | 362 | self.bg_processes = [p for p in self.bg_processes if p.returncode is None] |
@@ -1,1054 +1,1080 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Tools for inspecting Python objects. |
|
3 | 3 | |
|
4 | 4 | Uses syntax highlighting for presenting the various information elements. |
|
5 | 5 | |
|
6 | 6 | Similar in spirit to the inspect module, but all calls take a name argument to |
|
7 | 7 | reference the name under which an object is being read. |
|
8 | 8 | """ |
|
9 | 9 | |
|
10 | 10 | # Copyright (c) IPython Development Team. |
|
11 | 11 | # Distributed under the terms of the Modified BSD License. |
|
12 | 12 | |
|
13 | 13 | __all__ = ['Inspector','InspectColors'] |
|
14 | 14 | |
|
15 | 15 | # stdlib modules |
|
16 | 16 | import ast |
|
17 | 17 | import inspect |
|
18 | 18 | from inspect import signature |
|
19 | import html | |
|
19 | 20 | import linecache |
|
20 | 21 | import warnings |
|
21 | 22 | import os |
|
22 | 23 | from textwrap import dedent |
|
23 | 24 | import types |
|
24 | 25 | import io as stdlib_io |
|
25 | 26 | |
|
26 | 27 | from typing import Union |
|
27 | 28 | |
|
28 | 29 | # IPython's own |
|
29 | 30 | from IPython.core import page |
|
30 | 31 | from IPython.lib.pretty import pretty |
|
31 | 32 | from IPython.testing.skipdoctest import skip_doctest |
|
32 | 33 | from IPython.utils import PyColorize |
|
33 | 34 | from IPython.utils import openpy |
|
34 | 35 | from IPython.utils.dir2 import safe_hasattr |
|
35 | 36 | from IPython.utils.path import compress_user |
|
36 | 37 | from IPython.utils.text import indent |
|
37 | 38 | from IPython.utils.wildcard import list_namespace |
|
38 | 39 | from IPython.utils.wildcard import typestr2type |
|
39 | 40 | from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable |
|
40 | 41 | from IPython.utils.py3compat import cast_unicode |
|
41 | 42 | from IPython.utils.colorable import Colorable |
|
42 | 43 | from IPython.utils.decorators import undoc |
|
43 | 44 | |
|
44 | 45 | from pygments import highlight |
|
45 | 46 | from pygments.lexers import PythonLexer |
|
46 | 47 | from pygments.formatters import HtmlFormatter |
|
47 | 48 | |
|
48 | 49 | def pylight(code): |
|
49 | 50 | return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True)) |
|
50 | 51 | |
|
51 | 52 | # builtin docstrings to ignore |
|
52 | 53 | _func_call_docstring = types.FunctionType.__call__.__doc__ |
|
53 | 54 | _object_init_docstring = object.__init__.__doc__ |
|
54 | 55 | _builtin_type_docstrings = { |
|
55 | 56 | inspect.getdoc(t) for t in (types.ModuleType, types.MethodType, |
|
56 | 57 | types.FunctionType, property) |
|
57 | 58 | } |
|
58 | 59 | |
|
59 | 60 | _builtin_func_type = type(all) |
|
60 | 61 | _builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions |
|
61 | 62 | #**************************************************************************** |
|
62 | 63 | # Builtin color schemes |
|
63 | 64 | |
|
64 | 65 | Colors = TermColors # just a shorthand |
|
65 | 66 | |
|
66 | 67 | InspectColors = PyColorize.ANSICodeColors |
|
67 | 68 | |
|
68 | 69 | #**************************************************************************** |
|
69 | 70 | # Auxiliary functions and objects |
|
70 | 71 | |
|
71 | 72 | # See the messaging spec for the definition of all these fields. This list |
|
72 | 73 | # effectively defines the order of display |
|
73 | 74 | info_fields = ['type_name', 'base_class', 'string_form', 'namespace', |
|
74 | 75 | 'length', 'file', 'definition', 'docstring', 'source', |
|
75 | 76 | 'init_definition', 'class_docstring', 'init_docstring', |
|
76 | 77 | 'call_def', 'call_docstring', |
|
77 | 78 | # These won't be printed but will be used to determine how to |
|
78 | 79 | # format the object |
|
79 | 80 | 'ismagic', 'isalias', 'isclass', 'found', 'name' |
|
80 | 81 | ] |
|
81 | 82 | |
|
82 | 83 | |
|
83 | 84 | def object_info(**kw): |
|
84 | 85 | """Make an object info dict with all fields present.""" |
|
85 | 86 | infodict = {k:None for k in info_fields} |
|
86 | 87 | infodict.update(kw) |
|
87 | 88 | return infodict |
|
88 | 89 | |
|
89 | 90 | |
|
90 | 91 | def get_encoding(obj): |
|
91 | 92 | """Get encoding for python source file defining obj |
|
92 | 93 | |
|
93 | 94 | Returns None if obj is not defined in a sourcefile. |
|
94 | 95 | """ |
|
95 | 96 | ofile = find_file(obj) |
|
96 | 97 | # run contents of file through pager starting at line where the object |
|
97 | 98 | # is defined, as long as the file isn't binary and is actually on the |
|
98 | 99 | # filesystem. |
|
99 | 100 | if ofile is None: |
|
100 | 101 | return None |
|
101 | 102 | elif ofile.endswith(('.so', '.dll', '.pyd')): |
|
102 | 103 | return None |
|
103 | 104 | elif not os.path.isfile(ofile): |
|
104 | 105 | return None |
|
105 | 106 | else: |
|
106 | 107 | # Print only text files, not extension binaries. Note that |
|
107 | 108 | # getsourcelines returns lineno with 1-offset and page() uses |
|
108 | 109 | # 0-offset, so we must adjust. |
|
109 | 110 | with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2 |
|
110 | 111 | encoding, lines = openpy.detect_encoding(buffer.readline) |
|
111 | 112 | return encoding |
|
112 | 113 | |
|
113 | 114 | def getdoc(obj) -> Union[str,None]: |
|
114 | 115 | """Stable wrapper around inspect.getdoc. |
|
115 | 116 | |
|
116 | 117 | This can't crash because of attribute problems. |
|
117 | 118 | |
|
118 | 119 | It also attempts to call a getdoc() method on the given object. This |
|
119 | 120 | allows objects which provide their docstrings via non-standard mechanisms |
|
120 | 121 | (like Pyro proxies) to still be inspected by ipython's ? system. |
|
121 | 122 | """ |
|
122 | 123 | # Allow objects to offer customized documentation via a getdoc method: |
|
123 | 124 | try: |
|
124 | 125 | ds = obj.getdoc() |
|
125 | 126 | except Exception: |
|
126 | 127 | pass |
|
127 | 128 | else: |
|
128 | 129 | if isinstance(ds, str): |
|
129 | 130 | return inspect.cleandoc(ds) |
|
130 | 131 | docstr = inspect.getdoc(obj) |
|
131 | 132 | return docstr |
|
132 | 133 | |
|
133 | 134 | |
|
134 | 135 | def getsource(obj, oname='') -> Union[str,None]: |
|
135 | 136 | """Wrapper around inspect.getsource. |
|
136 | 137 | |
|
137 | 138 | This can be modified by other projects to provide customized source |
|
138 | 139 | extraction. |
|
139 | 140 | |
|
140 | 141 | Parameters |
|
141 | 142 | ---------- |
|
142 | 143 | obj : object |
|
143 | 144 | an object whose source code we will attempt to extract |
|
144 | 145 | oname : str |
|
145 | 146 | (optional) a name under which the object is known |
|
146 | 147 | |
|
147 | 148 | Returns |
|
148 | 149 | ------- |
|
149 | 150 | src : unicode or None |
|
150 | 151 | |
|
151 | 152 | """ |
|
152 | 153 | |
|
153 | 154 | if isinstance(obj, property): |
|
154 | 155 | sources = [] |
|
155 | 156 | for attrname in ['fget', 'fset', 'fdel']: |
|
156 | 157 | fn = getattr(obj, attrname) |
|
157 | 158 | if fn is not None: |
|
158 | 159 | encoding = get_encoding(fn) |
|
159 | 160 | oname_prefix = ('%s.' % oname) if oname else '' |
|
160 | 161 | sources.append(''.join(('# ', oname_prefix, attrname))) |
|
161 | 162 | if inspect.isfunction(fn): |
|
162 | 163 | sources.append(dedent(getsource(fn))) |
|
163 | 164 | else: |
|
164 | 165 | # Default str/repr only prints function name, |
|
165 | 166 | # pretty.pretty prints module name too. |
|
166 | 167 | sources.append( |
|
167 | 168 | '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn)) |
|
168 | 169 | ) |
|
169 | 170 | if sources: |
|
170 | 171 | return '\n'.join(sources) |
|
171 | 172 | else: |
|
172 | 173 | return None |
|
173 | 174 | |
|
174 | 175 | else: |
|
175 | 176 | # Get source for non-property objects. |
|
176 | 177 | |
|
177 | 178 | obj = _get_wrapped(obj) |
|
178 | 179 | |
|
179 | 180 | try: |
|
180 | 181 | src = inspect.getsource(obj) |
|
181 | 182 | except TypeError: |
|
182 | 183 | # The object itself provided no meaningful source, try looking for |
|
183 | 184 | # its class definition instead. |
|
184 | 185 | try: |
|
185 | 186 | src = inspect.getsource(obj.__class__) |
|
186 | 187 | except (OSError, TypeError): |
|
187 | 188 | return None |
|
188 | 189 | except OSError: |
|
189 | 190 | return None |
|
190 | 191 | |
|
191 | 192 | return src |
|
192 | 193 | |
|
193 | 194 | |
|
194 | 195 | def is_simple_callable(obj): |
|
195 | 196 | """True if obj is a function ()""" |
|
196 | 197 | return (inspect.isfunction(obj) or inspect.ismethod(obj) or \ |
|
197 | 198 | isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) |
|
198 | 199 | |
|
199 | 200 | @undoc |
|
200 | 201 | def getargspec(obj): |
|
201 | 202 | """Wrapper around :func:`inspect.getfullargspec` |
|
202 | 203 | |
|
203 | 204 | In addition to functions and methods, this can also handle objects with a |
|
204 | 205 | ``__call__`` attribute. |
|
205 | 206 | |
|
206 | 207 | DEPRECATED: Deprecated since 7.10. Do not use, will be removed. |
|
207 | 208 | """ |
|
208 | 209 | |
|
209 | 210 | warnings.warn('`getargspec` function is deprecated as of IPython 7.10' |
|
210 | 211 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
211 | 212 | |
|
212 | 213 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
213 | 214 | obj = obj.__call__ |
|
214 | 215 | |
|
215 | 216 | return inspect.getfullargspec(obj) |
|
216 | 217 | |
|
217 | 218 | @undoc |
|
218 | 219 | def format_argspec(argspec): |
|
219 | 220 | """Format argspect, convenience wrapper around inspect's. |
|
220 | 221 | |
|
221 | 222 | This takes a dict instead of ordered arguments and calls |
|
222 | 223 | inspect.format_argspec with the arguments in the necessary order. |
|
223 | 224 | |
|
224 | 225 | DEPRECATED (since 7.10): Do not use; will be removed in future versions. |
|
225 | 226 | """ |
|
226 | 227 | |
|
227 | 228 | warnings.warn('`format_argspec` function is deprecated as of IPython 7.10' |
|
228 | 229 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
229 | 230 | |
|
230 | 231 | |
|
231 | 232 | return inspect.formatargspec(argspec['args'], argspec['varargs'], |
|
232 | 233 | argspec['varkw'], argspec['defaults']) |
|
233 | 234 | |
|
234 | 235 | @undoc |
|
235 | 236 | def call_tip(oinfo, format_call=True): |
|
236 | 237 | """DEPRECATED since 6.0. Extract call tip data from an oinfo dict.""" |
|
237 | 238 | warnings.warn( |
|
238 | 239 | "`call_tip` function is deprecated as of IPython 6.0" |
|
239 | 240 | "and will be removed in future versions.", |
|
240 | 241 | DeprecationWarning, |
|
241 | 242 | stacklevel=2, |
|
242 | 243 | ) |
|
243 | 244 | # Get call definition |
|
244 | 245 | argspec = oinfo.get('argspec') |
|
245 | 246 | if argspec is None: |
|
246 | 247 | call_line = None |
|
247 | 248 | else: |
|
248 | 249 | # Callable objects will have 'self' as their first argument, prune |
|
249 | 250 | # it out if it's there for clarity (since users do *not* pass an |
|
250 | 251 | # extra first argument explicitly). |
|
251 | 252 | try: |
|
252 | 253 | has_self = argspec['args'][0] == 'self' |
|
253 | 254 | except (KeyError, IndexError): |
|
254 | 255 | pass |
|
255 | 256 | else: |
|
256 | 257 | if has_self: |
|
257 | 258 | argspec['args'] = argspec['args'][1:] |
|
258 | 259 | |
|
259 | 260 | call_line = oinfo['name']+format_argspec(argspec) |
|
260 | 261 | |
|
261 | 262 | # Now get docstring. |
|
262 | 263 | # The priority is: call docstring, constructor docstring, main one. |
|
263 | 264 | doc = oinfo.get('call_docstring') |
|
264 | 265 | if doc is None: |
|
265 | 266 | doc = oinfo.get('init_docstring') |
|
266 | 267 | if doc is None: |
|
267 | 268 | doc = oinfo.get('docstring','') |
|
268 | 269 | |
|
269 | 270 | return call_line, doc |
|
270 | 271 | |
|
271 | 272 | |
|
272 | 273 | def _get_wrapped(obj): |
|
273 | 274 | """Get the original object if wrapped in one or more @decorators |
|
274 | 275 | |
|
275 | 276 | Some objects automatically construct similar objects on any unrecognised |
|
276 | 277 | attribute access (e.g. unittest.mock.call). To protect against infinite loops, |
|
277 | 278 | this will arbitrarily cut off after 100 levels of obj.__wrapped__ |
|
278 | 279 | attribute access. --TK, Jan 2016 |
|
279 | 280 | """ |
|
280 | 281 | orig_obj = obj |
|
281 | 282 | i = 0 |
|
282 | 283 | while safe_hasattr(obj, '__wrapped__'): |
|
283 | 284 | obj = obj.__wrapped__ |
|
284 | 285 | i += 1 |
|
285 | 286 | if i > 100: |
|
286 | 287 | # __wrapped__ is probably a lie, so return the thing we started with |
|
287 | 288 | return orig_obj |
|
288 | 289 | return obj |
|
289 | 290 | |
|
290 | 291 | def find_file(obj) -> str: |
|
291 | 292 | """Find the absolute path to the file where an object was defined. |
|
292 | 293 | |
|
293 | 294 | This is essentially a robust wrapper around `inspect.getabsfile`. |
|
294 | 295 | |
|
295 | 296 | Returns None if no file can be found. |
|
296 | 297 | |
|
297 | 298 | Parameters |
|
298 | 299 | ---------- |
|
299 | 300 | obj : any Python object |
|
300 | 301 | |
|
301 | 302 | Returns |
|
302 | 303 | ------- |
|
303 | 304 | fname : str |
|
304 | 305 | The absolute path to the file where the object was defined. |
|
305 | 306 | """ |
|
306 | 307 | obj = _get_wrapped(obj) |
|
307 | 308 | |
|
308 | 309 | fname = None |
|
309 | 310 | try: |
|
310 | 311 | fname = inspect.getabsfile(obj) |
|
311 | 312 | except TypeError: |
|
312 | 313 | # For an instance, the file that matters is where its class was |
|
313 | 314 | # declared. |
|
314 | 315 | try: |
|
315 | 316 | fname = inspect.getabsfile(obj.__class__) |
|
316 | 317 | except (OSError, TypeError): |
|
317 | 318 | # Can happen for builtins |
|
318 | 319 | pass |
|
319 | 320 | except OSError: |
|
320 | 321 | pass |
|
321 | 322 | |
|
322 | 323 | return cast_unicode(fname) |
|
323 | 324 | |
|
324 | 325 | |
|
325 | 326 | def find_source_lines(obj): |
|
326 | 327 | """Find the line number in a file where an object was defined. |
|
327 | 328 | |
|
328 | 329 | This is essentially a robust wrapper around `inspect.getsourcelines`. |
|
329 | 330 | |
|
330 | 331 | Returns None if no file can be found. |
|
331 | 332 | |
|
332 | 333 | Parameters |
|
333 | 334 | ---------- |
|
334 | 335 | obj : any Python object |
|
335 | 336 | |
|
336 | 337 | Returns |
|
337 | 338 | ------- |
|
338 | 339 | lineno : int |
|
339 | 340 | The line number where the object definition starts. |
|
340 | 341 | """ |
|
341 | 342 | obj = _get_wrapped(obj) |
|
342 | 343 | |
|
343 | 344 | try: |
|
344 | 345 | lineno = inspect.getsourcelines(obj)[1] |
|
345 | 346 | except TypeError: |
|
346 | 347 | # For instances, try the class object like getsource() does |
|
347 | 348 | try: |
|
348 | 349 | lineno = inspect.getsourcelines(obj.__class__)[1] |
|
349 | 350 | except (OSError, TypeError): |
|
350 | 351 | return None |
|
351 | 352 | except OSError: |
|
352 | 353 | return None |
|
353 | 354 | |
|
354 | 355 | return lineno |
|
355 | 356 | |
|
356 | 357 | class Inspector(Colorable): |
|
357 | 358 | |
|
358 | 359 | def __init__(self, color_table=InspectColors, |
|
359 | 360 | code_color_table=PyColorize.ANSICodeColors, |
|
360 | 361 | scheme=None, |
|
361 | 362 | str_detail_level=0, |
|
362 | 363 | parent=None, config=None): |
|
363 | 364 | super(Inspector, self).__init__(parent=parent, config=config) |
|
364 | 365 | self.color_table = color_table |
|
365 | 366 | self.parser = PyColorize.Parser(out='str', parent=self, style=scheme) |
|
366 | 367 | self.format = self.parser.format |
|
367 | 368 | self.str_detail_level = str_detail_level |
|
368 | 369 | self.set_active_scheme(scheme) |
|
369 | 370 | |
|
370 | 371 | def _getdef(self,obj,oname='') -> Union[str,None]: |
|
371 | 372 | """Return the call signature for any callable object. |
|
372 | 373 | |
|
373 | 374 | If any exception is generated, None is returned instead and the |
|
374 | 375 | exception is suppressed.""" |
|
375 | 376 | try: |
|
376 | 377 | return _render_signature(signature(obj), oname) |
|
377 | 378 | except: |
|
378 | 379 | return None |
|
379 | 380 | |
|
380 | 381 | def __head(self,h) -> str: |
|
381 | 382 | """Return a header string with proper colors.""" |
|
382 | 383 | return '%s%s%s' % (self.color_table.active_colors.header,h, |
|
383 | 384 | self.color_table.active_colors.normal) |
|
384 | 385 | |
|
385 | 386 | def set_active_scheme(self, scheme): |
|
386 | 387 | if scheme is not None: |
|
387 | 388 | self.color_table.set_active_scheme(scheme) |
|
388 | 389 | self.parser.color_table.set_active_scheme(scheme) |
|
389 | 390 | |
|
390 | 391 | def noinfo(self, msg, oname): |
|
391 | 392 | """Generic message when no information is found.""" |
|
392 | 393 | print('No %s found' % msg, end=' ') |
|
393 | 394 | if oname: |
|
394 | 395 | print('for %s' % oname) |
|
395 | 396 | else: |
|
396 | 397 | print() |
|
397 | 398 | |
|
398 | 399 | def pdef(self, obj, oname=''): |
|
399 | 400 | """Print the call signature for any callable object. |
|
400 | 401 | |
|
401 | 402 | If the object is a class, print the constructor information.""" |
|
402 | 403 | |
|
403 | 404 | if not callable(obj): |
|
404 | 405 | print('Object is not callable.') |
|
405 | 406 | return |
|
406 | 407 | |
|
407 | 408 | header = '' |
|
408 | 409 | |
|
409 | 410 | if inspect.isclass(obj): |
|
410 | 411 | header = self.__head('Class constructor information:\n') |
|
411 | 412 | |
|
412 | 413 | |
|
413 | 414 | output = self._getdef(obj,oname) |
|
414 | 415 | if output is None: |
|
415 | 416 | self.noinfo('definition header',oname) |
|
416 | 417 | else: |
|
417 | 418 | print(header,self.format(output), end=' ') |
|
418 | 419 | |
|
419 | 420 | # In Python 3, all classes are new-style, so they all have __init__. |
|
420 | 421 | @skip_doctest |
|
421 | 422 | def pdoc(self, obj, oname='', formatter=None): |
|
422 | 423 | """Print the docstring for any object. |
|
423 | 424 | |
|
424 | 425 | Optional: |
|
425 | 426 | -formatter: a function to run the docstring through for specially |
|
426 | 427 | formatted docstrings. |
|
427 | 428 | |
|
428 | 429 | Examples |
|
429 | 430 | -------- |
|
430 | 431 | In [1]: class NoInit: |
|
431 | 432 | ...: pass |
|
432 | 433 | |
|
433 | 434 | In [2]: class NoDoc: |
|
434 | 435 | ...: def __init__(self): |
|
435 | 436 | ...: pass |
|
436 | 437 | |
|
437 | 438 | In [3]: %pdoc NoDoc |
|
438 | 439 | No documentation found for NoDoc |
|
439 | 440 | |
|
440 | 441 | In [4]: %pdoc NoInit |
|
441 | 442 | No documentation found for NoInit |
|
442 | 443 | |
|
443 | 444 | In [5]: obj = NoInit() |
|
444 | 445 | |
|
445 | 446 | In [6]: %pdoc obj |
|
446 | 447 | No documentation found for obj |
|
447 | 448 | |
|
448 | 449 | In [5]: obj2 = NoDoc() |
|
449 | 450 | |
|
450 | 451 | In [6]: %pdoc obj2 |
|
451 | 452 | No documentation found for obj2 |
|
452 | 453 | """ |
|
453 | 454 | |
|
454 | 455 | head = self.__head # For convenience |
|
455 | 456 | lines = [] |
|
456 | 457 | ds = getdoc(obj) |
|
457 | 458 | if formatter: |
|
458 | 459 | ds = formatter(ds).get('plain/text', ds) |
|
459 | 460 | if ds: |
|
460 | 461 | lines.append(head("Class docstring:")) |
|
461 | 462 | lines.append(indent(ds)) |
|
462 | 463 | if inspect.isclass(obj) and hasattr(obj, '__init__'): |
|
463 | 464 | init_ds = getdoc(obj.__init__) |
|
464 | 465 | if init_ds is not None: |
|
465 | 466 | lines.append(head("Init docstring:")) |
|
466 | 467 | lines.append(indent(init_ds)) |
|
467 | 468 | elif hasattr(obj,'__call__'): |
|
468 | 469 | call_ds = getdoc(obj.__call__) |
|
469 | 470 | if call_ds: |
|
470 | 471 | lines.append(head("Call docstring:")) |
|
471 | 472 | lines.append(indent(call_ds)) |
|
472 | 473 | |
|
473 | 474 | if not lines: |
|
474 | 475 | self.noinfo('documentation',oname) |
|
475 | 476 | else: |
|
476 | 477 | page.page('\n'.join(lines)) |
|
477 | 478 | |
|
478 | 479 | def psource(self, obj, oname=''): |
|
479 | 480 | """Print the source code for an object.""" |
|
480 | 481 | |
|
481 | 482 | # Flush the source cache because inspect can return out-of-date source |
|
482 | 483 | linecache.checkcache() |
|
483 | 484 | try: |
|
484 | 485 | src = getsource(obj, oname=oname) |
|
485 | 486 | except Exception: |
|
486 | 487 | src = None |
|
487 | 488 | |
|
488 | 489 | if src is None: |
|
489 | 490 | self.noinfo('source', oname) |
|
490 | 491 | else: |
|
491 | 492 | page.page(self.format(src)) |
|
492 | 493 | |
|
493 | 494 | def pfile(self, obj, oname=''): |
|
494 | 495 | """Show the whole file where an object was defined.""" |
|
495 | 496 | |
|
496 | 497 | lineno = find_source_lines(obj) |
|
497 | 498 | if lineno is None: |
|
498 | 499 | self.noinfo('file', oname) |
|
499 | 500 | return |
|
500 | 501 | |
|
501 | 502 | ofile = find_file(obj) |
|
502 | 503 | # run contents of file through pager starting at line where the object |
|
503 | 504 | # is defined, as long as the file isn't binary and is actually on the |
|
504 | 505 | # filesystem. |
|
505 | 506 | if ofile.endswith(('.so', '.dll', '.pyd')): |
|
506 | 507 | print('File %r is binary, not printing.' % ofile) |
|
507 | 508 | elif not os.path.isfile(ofile): |
|
508 | 509 | print('File %r does not exist, not printing.' % ofile) |
|
509 | 510 | else: |
|
510 | 511 | # Print only text files, not extension binaries. Note that |
|
511 | 512 | # getsourcelines returns lineno with 1-offset and page() uses |
|
512 | 513 | # 0-offset, so we must adjust. |
|
513 | 514 | page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1) |
|
514 | 515 | |
|
515 | 516 | |
|
516 | 517 | def _mime_format(self, text:str, formatter=None) -> dict: |
|
517 | 518 | """Return a mime bundle representation of the input text. |
|
518 | 519 | |
|
519 | 520 | - if `formatter` is None, the returned mime bundle has |
|
520 | 521 | a ``text/plain`` field, with the input text. |
|
521 | 522 | a ``text/html`` field with a ``<pre>`` tag containing the input text. |
|
522 | 523 | |
|
523 | 524 | - if ``formatter`` is not None, it must be a callable transforming the |
|
524 | 525 | input text into a mime bundle. Default values for ``text/plain`` and |
|
525 | 526 | ``text/html`` representations are the ones described above. |
|
526 | 527 | |
|
527 | 528 | Note: |
|
528 | 529 | |
|
529 | 530 | Formatters returning strings are supported but this behavior is deprecated. |
|
530 | 531 | |
|
531 | 532 | """ |
|
532 | 533 | defaults = { |
|
533 |
|
|
|
534 |
|
|
|
534 | "text/plain": text, | |
|
535 | "text/html": f"<pre>{html.escape(text)}</pre>", | |
|
535 | 536 | } |
|
536 | 537 | |
|
537 | 538 | if formatter is None: |
|
538 | 539 | return defaults |
|
539 | 540 | else: |
|
540 | 541 | formatted = formatter(text) |
|
541 | 542 | |
|
542 | 543 | if not isinstance(formatted, dict): |
|
543 | 544 | # Handle the deprecated behavior of a formatter returning |
|
544 | 545 | # a string instead of a mime bundle. |
|
545 | return { | |
|
546 | 'text/plain': formatted, | |
|
547 | 'text/html': '<pre>' + formatted + '</pre>' | |
|
548 | } | |
|
546 | return {"text/plain": formatted, "text/html": f"<pre>{formatted}</pre>"} | |
|
549 | 547 | |
|
550 | 548 | else: |
|
551 | 549 | return dict(defaults, **formatted) |
|
552 | 550 | |
|
553 | 551 | |
|
554 | 552 | def format_mime(self, bundle): |
|
553 | """Format a mimebundle being created by _make_info_unformatted into a real mimebundle""" | |
|
554 | # Format text/plain mimetype | |
|
555 | if isinstance(bundle["text/plain"], (list, tuple)): | |
|
556 | # bundle['text/plain'] is a list of (head, formatted body) pairs | |
|
557 | lines = [] | |
|
558 | _len = max(len(h) for h, _ in bundle["text/plain"]) | |
|
555 | 559 | |
|
556 |
|
|
|
557 | ||
|
558 | text = '' | |
|
559 | heads, bodies = list(zip(*text_plain)) | |
|
560 | _len = max(len(h) for h in heads) | |
|
560 | for head, body in bundle["text/plain"]: | |
|
561 | body = body.strip("\n") | |
|
562 | delim = "\n" if "\n" in body else " " | |
|
563 | lines.append( | |
|
564 | f"{self.__head(head+':')}{(_len - len(head))*' '}{delim}{body}" | |
|
565 | ) | |
|
561 | 566 | |
|
562 | for head, body in zip(heads, bodies): | |
|
563 | body = body.strip('\n') | |
|
564 | delim = '\n' if '\n' in body else ' ' | |
|
565 | text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n' | |
|
567 | bundle["text/plain"] = "\n".join(lines) | |
|
566 | 568 | |
|
567 | bundle['text/plain'] = text | |
|
569 | # Format the text/html mimetype | |
|
570 | if isinstance(bundle["text/html"], (list, tuple)): | |
|
571 | # bundle['text/html'] is a list of (head, formatted body) pairs | |
|
572 | bundle["text/html"] = "\n".join( | |
|
573 | (f"<h1>{head}</h1>\n{body}" for (head, body) in bundle["text/html"]) | |
|
574 | ) | |
|
568 | 575 | return bundle |
|
569 | 576 | |
|
570 |
def _ |
|
|
571 | self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=() | |
|
577 | def _append_info_field( | |
|
578 | self, bundle, title: str, key: str, info, omit_sections, formatter | |
|
572 | 579 | ): |
|
573 | """Retrieve an info dict and format it. | |
|
574 | ||
|
575 | Parameters | |
|
576 | ---------- | |
|
577 | obj : any | |
|
578 | Object to inspect and return info from | |
|
579 | oname : str (default: ''): | |
|
580 | Name of the variable pointing to `obj`. | |
|
581 | formatter : callable | |
|
582 | info | |
|
583 | already computed information | |
|
584 | detail_level : integer | |
|
585 | Granularity of detail level, if set to 1, give more information. | |
|
586 | omit_sections : container[str] | |
|
587 | Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`) | |
|
588 | """ | |
|
589 | ||
|
590 | info = self.info(obj, oname=oname, info=info, detail_level=detail_level) | |
|
591 | ||
|
592 | _mime = { | |
|
593 | 'text/plain': [], | |
|
594 | 'text/html': '', | |
|
595 | } | |
|
596 | ||
|
597 | def append_field(bundle, title:str, key:str, formatter=None): | |
|
580 | """Append an info value to the unformatted mimebundle being constructed by _make_info_unformatted""" | |
|
598 | 581 |
|
|
599 | 582 |
|
|
600 | 583 |
|
|
601 | 584 |
|
|
602 | 585 |
|
|
603 |
|
|
|
604 |
|
|
|
586 | bundle["text/plain"].append((title, formatted_field["text/plain"])) | |
|
587 | bundle["text/html"].append((title, formatted_field["text/html"])) | |
|
588 | ||
|
589 | def _make_info_unformatted(self, obj, info, formatter, detail_level, omit_sections): | |
|
590 | """Assemble the mimebundle as unformatted lists of information""" | |
|
591 | bundle = { | |
|
592 | "text/plain": [], | |
|
593 | "text/html": [], | |
|
594 | } | |
|
595 | ||
|
596 | # A convenience function to simplify calls below | |
|
597 | def append_field(bundle, title: str, key: str, formatter=None): | |
|
598 | self._append_info_field( | |
|
599 | bundle, | |
|
600 | title=title, | |
|
601 | key=key, | |
|
602 | info=info, | |
|
603 | omit_sections=omit_sections, | |
|
604 | formatter=formatter, | |
|
605 | ) | |
|
605 | 606 | |
|
606 | 607 | def code_formatter(text): |
|
607 | 608 | return { |
|
608 | 609 | 'text/plain': self.format(text), |
|
609 | 610 | 'text/html': pylight(text) |
|
610 | 611 | } |
|
611 | 612 | |
|
612 |
if info[ |
|
|
613 |
append_field( |
|
|
613 | if info["isalias"]: | |
|
614 | append_field(bundle, "Repr", "string_form") | |
|
614 | 615 | |
|
615 | 616 | elif info['ismagic']: |
|
616 | 617 | if detail_level > 0: |
|
617 |
append_field( |
|
|
618 | append_field(bundle, "Source", "source", code_formatter) | |
|
618 | 619 | else: |
|
619 |
append_field( |
|
|
620 |
append_field( |
|
|
620 | append_field(bundle, "Docstring", "docstring", formatter) | |
|
621 | append_field(bundle, "File", "file") | |
|
621 | 622 | |
|
622 | 623 | elif info['isclass'] or is_simple_callable(obj): |
|
623 | 624 | # Functions, methods, classes |
|
624 |
append_field( |
|
|
625 |
append_field( |
|
|
626 |
append_field( |
|
|
627 |
if detail_level > 0 and info[ |
|
|
628 |
append_field( |
|
|
625 | append_field(bundle, "Signature", "definition", code_formatter) | |
|
626 | append_field(bundle, "Init signature", "init_definition", code_formatter) | |
|
627 | append_field(bundle, "Docstring", "docstring", formatter) | |
|
628 | if detail_level > 0 and info["source"]: | |
|
629 | append_field(bundle, "Source", "source", code_formatter) | |
|
629 | 630 | else: |
|
630 |
append_field( |
|
|
631 | append_field(bundle, "Init docstring", "init_docstring", formatter) | |
|
631 | 632 | |
|
632 |
append_field( |
|
|
633 |
append_field( |
|
|
634 |
append_field( |
|
|
633 | append_field(bundle, "File", "file") | |
|
634 | append_field(bundle, "Type", "type_name") | |
|
635 | append_field(bundle, "Subclasses", "subclasses") | |
|
635 | 636 | |
|
636 | 637 | else: |
|
637 | 638 | # General Python objects |
|
638 |
append_field( |
|
|
639 |
append_field( |
|
|
640 |
append_field( |
|
|
641 |
append_field( |
|
|
639 | append_field(bundle, "Signature", "definition", code_formatter) | |
|
640 | append_field(bundle, "Call signature", "call_def", code_formatter) | |
|
641 | append_field(bundle, "Type", "type_name") | |
|
642 | append_field(bundle, "String form", "string_form") | |
|
642 | 643 | |
|
643 | 644 | # Namespace |
|
644 |
if info[ |
|
|
645 |
append_field( |
|
|
645 | if info["namespace"] != "Interactive": | |
|
646 | append_field(bundle, "Namespace", "namespace") | |
|
646 | 647 | |
|
647 |
append_field( |
|
|
648 |
append_field( |
|
|
648 | append_field(bundle, "Length", "length") | |
|
649 | append_field(bundle, "File", "file") | |
|
649 | 650 | |
|
650 | 651 | # Source or docstring, depending on detail level and whether |
|
651 | 652 | # source found. |
|
652 |
if detail_level > 0 and info[ |
|
|
653 |
append_field( |
|
|
653 | if detail_level > 0 and info["source"]: | |
|
654 | append_field(bundle, "Source", "source", code_formatter) | |
|
654 | 655 | else: |
|
655 |
append_field( |
|
|
656 | append_field(bundle, "Docstring", "docstring", formatter) | |
|
657 | ||
|
658 | append_field(bundle, "Class docstring", "class_docstring", formatter) | |
|
659 | append_field(bundle, "Init docstring", "init_docstring", formatter) | |
|
660 | append_field(bundle, "Call docstring", "call_docstring", formatter) | |
|
661 | return bundle | |
|
656 | 662 | |
|
657 | append_field(_mime, 'Class docstring', 'class_docstring', formatter) | |
|
658 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) | |
|
659 | append_field(_mime, 'Call docstring', 'call_docstring', formatter) | |
|
660 | 663 | |
|
664 | def _get_info( | |
|
665 | self, obj, oname="", formatter=None, info=None, detail_level=0, omit_sections=() | |
|
666 | ): | |
|
667 | """Retrieve an info dict and format it. | |
|
668 | ||
|
669 | Parameters | |
|
670 | ---------- | |
|
671 | obj : any | |
|
672 | Object to inspect and return info from | |
|
673 | oname : str (default: ''): | |
|
674 | Name of the variable pointing to `obj`. | |
|
675 | formatter : callable | |
|
676 | info | |
|
677 | already computed information | |
|
678 | detail_level : integer | |
|
679 | Granularity of detail level, if set to 1, give more information. | |
|
680 | omit_sections : container[str] | |
|
681 | Titles or keys to omit from output (can be set, tuple, etc., anything supporting `in`) | |
|
682 | """ | |
|
661 | 683 | |
|
662 | return self.format_mime(_mime) | |
|
684 | info = self.info(obj, oname=oname, info=info, detail_level=detail_level) | |
|
685 | bundle = self._make_info_unformatted( | |
|
686 | obj, info, formatter, detail_level=detail_level, omit_sections=omit_sections | |
|
687 | ) | |
|
688 | return self.format_mime(bundle) | |
|
663 | 689 | |
|
664 | 690 | def pinfo( |
|
665 | 691 | self, |
|
666 | 692 | obj, |
|
667 | 693 | oname="", |
|
668 | 694 | formatter=None, |
|
669 | 695 | info=None, |
|
670 | 696 | detail_level=0, |
|
671 | 697 | enable_html_pager=True, |
|
672 | 698 | omit_sections=(), |
|
673 | 699 | ): |
|
674 | 700 | """Show detailed information about an object. |
|
675 | 701 | |
|
676 | 702 | Optional arguments: |
|
677 | 703 | |
|
678 | 704 | - oname: name of the variable pointing to the object. |
|
679 | 705 | |
|
680 | 706 | - formatter: callable (optional) |
|
681 | 707 | A special formatter for docstrings. |
|
682 | 708 | |
|
683 | 709 | The formatter is a callable that takes a string as an input |
|
684 | 710 | and returns either a formatted string or a mime type bundle |
|
685 | 711 | in the form of a dictionary. |
|
686 | 712 | |
|
687 | 713 | Although the support of custom formatter returning a string |
|
688 | 714 | instead of a mime type bundle is deprecated. |
|
689 | 715 | |
|
690 | 716 | - info: a structure with some information fields which may have been |
|
691 | 717 | precomputed already. |
|
692 | 718 | |
|
693 | 719 | - detail_level: if set to 1, more information is given. |
|
694 | 720 | |
|
695 | 721 | - omit_sections: set of section keys and titles to omit |
|
696 | 722 | """ |
|
697 | 723 | info = self._get_info( |
|
698 | 724 | obj, oname, formatter, info, detail_level, omit_sections=omit_sections |
|
699 | 725 | ) |
|
700 | 726 | if not enable_html_pager: |
|
701 | 727 | del info['text/html'] |
|
702 | 728 | page.page(info) |
|
703 | 729 | |
|
704 | 730 | def _info(self, obj, oname="", info=None, detail_level=0): |
|
705 | 731 | """ |
|
706 | 732 | Inspector.info() was likely improperly marked as deprecated |
|
707 | 733 | while only a parameter was deprecated. We "un-deprecate" it. |
|
708 | 734 | """ |
|
709 | 735 | |
|
710 | 736 | warnings.warn( |
|
711 | 737 | "The `Inspector.info()` method has been un-deprecated as of 8.0 " |
|
712 | 738 | "and the `formatter=` keyword removed. `Inspector._info` is now " |
|
713 | 739 | "an alias, and you can just call `.info()` directly.", |
|
714 | 740 | DeprecationWarning, |
|
715 | 741 | stacklevel=2, |
|
716 | 742 | ) |
|
717 | 743 | return self.info(obj, oname=oname, info=info, detail_level=detail_level) |
|
718 | 744 | |
|
719 | 745 | def info(self, obj, oname="", info=None, detail_level=0) -> dict: |
|
720 | 746 | """Compute a dict with detailed information about an object. |
|
721 | 747 | |
|
722 | 748 | Parameters |
|
723 | 749 | ---------- |
|
724 | 750 | obj : any |
|
725 | 751 | An object to find information about |
|
726 | 752 | oname : str (default: '') |
|
727 | 753 | Name of the variable pointing to `obj`. |
|
728 | 754 | info : (default: None) |
|
729 | 755 | A struct (dict like with attr access) with some information fields |
|
730 | 756 | which may have been precomputed already. |
|
731 | 757 | detail_level : int (default:0) |
|
732 | 758 | If set to 1, more information is given. |
|
733 | 759 | |
|
734 | 760 | Returns |
|
735 | 761 | ------- |
|
736 | 762 | An object info dict with known fields from `info_fields`. Keys are |
|
737 | 763 | strings, values are string or None. |
|
738 | 764 | """ |
|
739 | 765 | |
|
740 | 766 | if info is None: |
|
741 | 767 | ismagic = False |
|
742 | 768 | isalias = False |
|
743 | 769 | ospace = '' |
|
744 | 770 | else: |
|
745 | 771 | ismagic = info.ismagic |
|
746 | 772 | isalias = info.isalias |
|
747 | 773 | ospace = info.namespace |
|
748 | 774 | |
|
749 | 775 | # Get docstring, special-casing aliases: |
|
750 | 776 | if isalias: |
|
751 | 777 | if not callable(obj): |
|
752 | 778 | try: |
|
753 | 779 | ds = "Alias to the system command:\n %s" % obj[1] |
|
754 | 780 | except: |
|
755 | 781 | ds = "Alias: " + str(obj) |
|
756 | 782 | else: |
|
757 | 783 | ds = "Alias to " + str(obj) |
|
758 | 784 | if obj.__doc__: |
|
759 | 785 | ds += "\nDocstring:\n" + obj.__doc__ |
|
760 | 786 | else: |
|
761 | 787 | ds = getdoc(obj) |
|
762 | 788 | if ds is None: |
|
763 | 789 | ds = '<no docstring>' |
|
764 | 790 | |
|
765 | 791 | # store output in a dict, we initialize it here and fill it as we go |
|
766 | 792 | out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None) |
|
767 | 793 | |
|
768 | 794 | string_max = 200 # max size of strings to show (snipped if longer) |
|
769 | 795 | shalf = int((string_max - 5) / 2) |
|
770 | 796 | |
|
771 | 797 | if ismagic: |
|
772 | 798 | out['type_name'] = 'Magic function' |
|
773 | 799 | elif isalias: |
|
774 | 800 | out['type_name'] = 'System alias' |
|
775 | 801 | else: |
|
776 | 802 | out['type_name'] = type(obj).__name__ |
|
777 | 803 | |
|
778 | 804 | try: |
|
779 | 805 | bclass = obj.__class__ |
|
780 | 806 | out['base_class'] = str(bclass) |
|
781 | 807 | except: |
|
782 | 808 | pass |
|
783 | 809 | |
|
784 | 810 | # String form, but snip if too long in ? form (full in ??) |
|
785 | 811 | if detail_level >= self.str_detail_level: |
|
786 | 812 | try: |
|
787 | 813 | ostr = str(obj) |
|
788 | 814 | str_head = 'string_form' |
|
789 | 815 | if not detail_level and len(ostr)>string_max: |
|
790 | 816 | ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] |
|
791 | 817 | ostr = ("\n" + " " * len(str_head.expandtabs())).\ |
|
792 | 818 | join(q.strip() for q in ostr.split("\n")) |
|
793 | 819 | out[str_head] = ostr |
|
794 | 820 | except: |
|
795 | 821 | pass |
|
796 | 822 | |
|
797 | 823 | if ospace: |
|
798 | 824 | out['namespace'] = ospace |
|
799 | 825 | |
|
800 | 826 | # Length (for strings and lists) |
|
801 | 827 | try: |
|
802 | 828 | out['length'] = str(len(obj)) |
|
803 | 829 | except Exception: |
|
804 | 830 | pass |
|
805 | 831 | |
|
806 | 832 | # Filename where object was defined |
|
807 | 833 | binary_file = False |
|
808 | 834 | fname = find_file(obj) |
|
809 | 835 | if fname is None: |
|
810 | 836 | # if anything goes wrong, we don't want to show source, so it's as |
|
811 | 837 | # if the file was binary |
|
812 | 838 | binary_file = True |
|
813 | 839 | else: |
|
814 | 840 | if fname.endswith(('.so', '.dll', '.pyd')): |
|
815 | 841 | binary_file = True |
|
816 | 842 | elif fname.endswith('<string>'): |
|
817 | 843 | fname = 'Dynamically generated function. No source code available.' |
|
818 | 844 | out['file'] = compress_user(fname) |
|
819 | 845 | |
|
820 | 846 | # Original source code for a callable, class or property. |
|
821 | 847 | if detail_level: |
|
822 | 848 | # Flush the source cache because inspect can return out-of-date |
|
823 | 849 | # source |
|
824 | 850 | linecache.checkcache() |
|
825 | 851 | try: |
|
826 | 852 | if isinstance(obj, property) or not binary_file: |
|
827 | 853 | src = getsource(obj, oname) |
|
828 | 854 | if src is not None: |
|
829 | 855 | src = src.rstrip() |
|
830 | 856 | out['source'] = src |
|
831 | 857 | |
|
832 | 858 | except Exception: |
|
833 | 859 | pass |
|
834 | 860 | |
|
835 | 861 | # Add docstring only if no source is to be shown (avoid repetitions). |
|
836 | 862 | if ds and not self._source_contains_docstring(out.get('source'), ds): |
|
837 | 863 | out['docstring'] = ds |
|
838 | 864 | |
|
839 | 865 | # Constructor docstring for classes |
|
840 | 866 | if inspect.isclass(obj): |
|
841 | 867 | out['isclass'] = True |
|
842 | 868 | |
|
843 | 869 | # get the init signature: |
|
844 | 870 | try: |
|
845 | 871 | init_def = self._getdef(obj, oname) |
|
846 | 872 | except AttributeError: |
|
847 | 873 | init_def = None |
|
848 | 874 | |
|
849 | 875 | # get the __init__ docstring |
|
850 | 876 | try: |
|
851 | 877 | obj_init = obj.__init__ |
|
852 | 878 | except AttributeError: |
|
853 | 879 | init_ds = None |
|
854 | 880 | else: |
|
855 | 881 | if init_def is None: |
|
856 | 882 | # Get signature from init if top-level sig failed. |
|
857 | 883 | # Can happen for built-in types (list, etc.). |
|
858 | 884 | try: |
|
859 | 885 | init_def = self._getdef(obj_init, oname) |
|
860 | 886 | except AttributeError: |
|
861 | 887 | pass |
|
862 | 888 | init_ds = getdoc(obj_init) |
|
863 | 889 | # Skip Python's auto-generated docstrings |
|
864 | 890 | if init_ds == _object_init_docstring: |
|
865 | 891 | init_ds = None |
|
866 | 892 | |
|
867 | 893 | if init_def: |
|
868 | 894 | out['init_definition'] = init_def |
|
869 | 895 | |
|
870 | 896 | if init_ds: |
|
871 | 897 | out['init_docstring'] = init_ds |
|
872 | 898 | |
|
873 | 899 | names = [sub.__name__ for sub in type.__subclasses__(obj)] |
|
874 | 900 | if len(names) < 10: |
|
875 | 901 | all_names = ', '.join(names) |
|
876 | 902 | else: |
|
877 | 903 | all_names = ', '.join(names[:10]+['...']) |
|
878 | 904 | out['subclasses'] = all_names |
|
879 | 905 | # and class docstring for instances: |
|
880 | 906 | else: |
|
881 | 907 | # reconstruct the function definition and print it: |
|
882 | 908 | defln = self._getdef(obj, oname) |
|
883 | 909 | if defln: |
|
884 | 910 | out['definition'] = defln |
|
885 | 911 | |
|
886 | 912 | # First, check whether the instance docstring is identical to the |
|
887 | 913 | # class one, and print it separately if they don't coincide. In |
|
888 | 914 | # most cases they will, but it's nice to print all the info for |
|
889 | 915 | # objects which use instance-customized docstrings. |
|
890 | 916 | if ds: |
|
891 | 917 | try: |
|
892 | 918 | cls = getattr(obj,'__class__') |
|
893 | 919 | except: |
|
894 | 920 | class_ds = None |
|
895 | 921 | else: |
|
896 | 922 | class_ds = getdoc(cls) |
|
897 | 923 | # Skip Python's auto-generated docstrings |
|
898 | 924 | if class_ds in _builtin_type_docstrings: |
|
899 | 925 | class_ds = None |
|
900 | 926 | if class_ds and ds != class_ds: |
|
901 | 927 | out['class_docstring'] = class_ds |
|
902 | 928 | |
|
903 | 929 | # Next, try to show constructor docstrings |
|
904 | 930 | try: |
|
905 | 931 | init_ds = getdoc(obj.__init__) |
|
906 | 932 | # Skip Python's auto-generated docstrings |
|
907 | 933 | if init_ds == _object_init_docstring: |
|
908 | 934 | init_ds = None |
|
909 | 935 | except AttributeError: |
|
910 | 936 | init_ds = None |
|
911 | 937 | if init_ds: |
|
912 | 938 | out['init_docstring'] = init_ds |
|
913 | 939 | |
|
914 | 940 | # Call form docstring for callable instances |
|
915 | 941 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
916 | 942 | call_def = self._getdef(obj.__call__, oname) |
|
917 | 943 | if call_def and (call_def != out.get('definition')): |
|
918 | 944 | # it may never be the case that call def and definition differ, |
|
919 | 945 | # but don't include the same signature twice |
|
920 | 946 | out['call_def'] = call_def |
|
921 | 947 | call_ds = getdoc(obj.__call__) |
|
922 | 948 | # Skip Python's auto-generated docstrings |
|
923 | 949 | if call_ds == _func_call_docstring: |
|
924 | 950 | call_ds = None |
|
925 | 951 | if call_ds: |
|
926 | 952 | out['call_docstring'] = call_ds |
|
927 | 953 | |
|
928 | 954 | return object_info(**out) |
|
929 | 955 | |
|
930 | 956 | @staticmethod |
|
931 | 957 | def _source_contains_docstring(src, doc): |
|
932 | 958 | """ |
|
933 | 959 | Check whether the source *src* contains the docstring *doc*. |
|
934 | 960 | |
|
935 | 961 | This is is helper function to skip displaying the docstring if the |
|
936 | 962 | source already contains it, avoiding repetition of information. |
|
937 | 963 | """ |
|
938 | 964 | try: |
|
939 | 965 | def_node, = ast.parse(dedent(src)).body |
|
940 | 966 | return ast.get_docstring(def_node) == doc |
|
941 | 967 | except Exception: |
|
942 | 968 | # The source can become invalid or even non-existent (because it |
|
943 | 969 | # is re-fetched from the source file) so the above code fail in |
|
944 | 970 | # arbitrary ways. |
|
945 | 971 | return False |
|
946 | 972 | |
|
947 | 973 | def psearch(self,pattern,ns_table,ns_search=[], |
|
948 | 974 | ignore_case=False,show_all=False, *, list_types=False): |
|
949 | 975 | """Search namespaces with wildcards for objects. |
|
950 | 976 | |
|
951 | 977 | Arguments: |
|
952 | 978 | |
|
953 | 979 | - pattern: string containing shell-like wildcards to use in namespace |
|
954 | 980 | searches and optionally a type specification to narrow the search to |
|
955 | 981 | objects of that type. |
|
956 | 982 | |
|
957 | 983 | - ns_table: dict of name->namespaces for search. |
|
958 | 984 | |
|
959 | 985 | Optional arguments: |
|
960 | 986 | |
|
961 | 987 | - ns_search: list of namespace names to include in search. |
|
962 | 988 | |
|
963 | 989 | - ignore_case(False): make the search case-insensitive. |
|
964 | 990 | |
|
965 | 991 | - show_all(False): show all names, including those starting with |
|
966 | 992 | underscores. |
|
967 | 993 | |
|
968 | 994 | - list_types(False): list all available object types for object matching. |
|
969 | 995 | """ |
|
970 | 996 | #print 'ps pattern:<%r>' % pattern # dbg |
|
971 | 997 | |
|
972 | 998 | # defaults |
|
973 | 999 | type_pattern = 'all' |
|
974 | 1000 | filter = '' |
|
975 | 1001 | |
|
976 | 1002 | # list all object types |
|
977 | 1003 | if list_types: |
|
978 | 1004 | page.page('\n'.join(sorted(typestr2type))) |
|
979 | 1005 | return |
|
980 | 1006 | |
|
981 | 1007 | cmds = pattern.split() |
|
982 | 1008 | len_cmds = len(cmds) |
|
983 | 1009 | if len_cmds == 1: |
|
984 | 1010 | # Only filter pattern given |
|
985 | 1011 | filter = cmds[0] |
|
986 | 1012 | elif len_cmds == 2: |
|
987 | 1013 | # Both filter and type specified |
|
988 | 1014 | filter,type_pattern = cmds |
|
989 | 1015 | else: |
|
990 | 1016 | raise ValueError('invalid argument string for psearch: <%s>' % |
|
991 | 1017 | pattern) |
|
992 | 1018 | |
|
993 | 1019 | # filter search namespaces |
|
994 | 1020 | for name in ns_search: |
|
995 | 1021 | if name not in ns_table: |
|
996 | 1022 | raise ValueError('invalid namespace <%s>. Valid names: %s' % |
|
997 | 1023 | (name,ns_table.keys())) |
|
998 | 1024 | |
|
999 | 1025 | #print 'type_pattern:',type_pattern # dbg |
|
1000 | 1026 | search_result, namespaces_seen = set(), set() |
|
1001 | 1027 | for ns_name in ns_search: |
|
1002 | 1028 | ns = ns_table[ns_name] |
|
1003 | 1029 | # Normally, locals and globals are the same, so we just check one. |
|
1004 | 1030 | if id(ns) in namespaces_seen: |
|
1005 | 1031 | continue |
|
1006 | 1032 | namespaces_seen.add(id(ns)) |
|
1007 | 1033 | tmp_res = list_namespace(ns, type_pattern, filter, |
|
1008 | 1034 | ignore_case=ignore_case, show_all=show_all) |
|
1009 | 1035 | search_result.update(tmp_res) |
|
1010 | 1036 | |
|
1011 | 1037 | page.page('\n'.join(sorted(search_result))) |
|
1012 | 1038 | |
|
1013 | 1039 | |
|
1014 | 1040 | def _render_signature(obj_signature, obj_name) -> str: |
|
1015 | 1041 | """ |
|
1016 | 1042 | This was mostly taken from inspect.Signature.__str__. |
|
1017 | 1043 | Look there for the comments. |
|
1018 | 1044 | The only change is to add linebreaks when this gets too long. |
|
1019 | 1045 | """ |
|
1020 | 1046 | result = [] |
|
1021 | 1047 | pos_only = False |
|
1022 | 1048 | kw_only = True |
|
1023 | 1049 | for param in obj_signature.parameters.values(): |
|
1024 | 1050 | if param.kind == inspect._POSITIONAL_ONLY: |
|
1025 | 1051 | pos_only = True |
|
1026 | 1052 | elif pos_only: |
|
1027 | 1053 | result.append('/') |
|
1028 | 1054 | pos_only = False |
|
1029 | 1055 | |
|
1030 | 1056 | if param.kind == inspect._VAR_POSITIONAL: |
|
1031 | 1057 | kw_only = False |
|
1032 | 1058 | elif param.kind == inspect._KEYWORD_ONLY and kw_only: |
|
1033 | 1059 | result.append('*') |
|
1034 | 1060 | kw_only = False |
|
1035 | 1061 | |
|
1036 | 1062 | result.append(str(param)) |
|
1037 | 1063 | |
|
1038 | 1064 | if pos_only: |
|
1039 | 1065 | result.append('/') |
|
1040 | 1066 | |
|
1041 | 1067 | # add up name, parameters, braces (2), and commas |
|
1042 | 1068 | if len(obj_name) + sum(len(r) + 2 for r in result) > 75: |
|
1043 | 1069 | # This doesn’t fit behind “Signature: ” in an inspect window. |
|
1044 | 1070 | rendered = '{}(\n{})'.format(obj_name, ''.join( |
|
1045 | 1071 | ' {},\n'.format(r) for r in result) |
|
1046 | 1072 | ) |
|
1047 | 1073 | else: |
|
1048 | 1074 | rendered = '{}({})'.format(obj_name, ', '.join(result)) |
|
1049 | 1075 | |
|
1050 | 1076 | if obj_signature.return_annotation is not inspect._empty: |
|
1051 | 1077 | anno = inspect.formatannotation(obj_signature.return_annotation) |
|
1052 | 1078 | rendered += ' -> {}'.format(anno) |
|
1053 | 1079 | |
|
1054 | 1080 | return rendered |
@@ -1,54 +1,54 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Release data for the IPython project.""" |
|
3 | 3 | |
|
4 | 4 | #----------------------------------------------------------------------------- |
|
5 | 5 | # Copyright (c) 2008, IPython Development Team. |
|
6 | 6 | # Copyright (c) 2001, Fernando Perez <fernando.perez@colorado.edu> |
|
7 | 7 | # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de> |
|
8 | 8 | # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu> |
|
9 | 9 | # |
|
10 | 10 | # Distributed under the terms of the Modified BSD License. |
|
11 | 11 | # |
|
12 | 12 | # The full license is in the file COPYING.txt, distributed with this software. |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 | 15 | # IPython version information. An empty _version_extra corresponds to a full |
|
16 | 16 | # release. 'dev' as a _version_extra string means this is a development |
|
17 | 17 | # version |
|
18 | 18 | _version_major = 8 |
|
19 |
_version_minor = |
|
|
19 | _version_minor = 9 | |
|
20 | 20 | _version_patch = 0 |
|
21 | 21 | _version_extra = ".dev" |
|
22 | 22 | # _version_extra = "rc1" |
|
23 | 23 | # _version_extra = "" # Uncomment this for full releases |
|
24 | 24 | |
|
25 | 25 | # Construct full version string from these. |
|
26 | 26 | _ver = [_version_major, _version_minor, _version_patch] |
|
27 | 27 | |
|
28 | 28 | __version__ = '.'.join(map(str, _ver)) |
|
29 | 29 | if _version_extra: |
|
30 | 30 | __version__ = __version__ + _version_extra |
|
31 | 31 | |
|
32 | 32 | version = __version__ # backwards compatibility name |
|
33 | 33 | version_info = (_version_major, _version_minor, _version_patch, _version_extra) |
|
34 | 34 | |
|
35 | 35 | # Change this when incrementing the kernel protocol version |
|
36 | 36 | kernel_protocol_version_info = (5, 0) |
|
37 | 37 | kernel_protocol_version = "%i.%i" % kernel_protocol_version_info |
|
38 | 38 | |
|
39 | 39 | license = "BSD-3-Clause" |
|
40 | 40 | |
|
41 | 41 | authors = {'Fernando' : ('Fernando Perez','fperez.net@gmail.com'), |
|
42 | 42 | 'Janko' : ('Janko Hauser','jhauser@zscout.de'), |
|
43 | 43 | 'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'), |
|
44 | 44 | 'Ville' : ('Ville Vainio','vivainio@gmail.com'), |
|
45 | 45 | 'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'), |
|
46 | 46 | 'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com'), |
|
47 | 47 | 'Thomas' : ('Thomas A. Kluyver', 'takowl@gmail.com'), |
|
48 | 48 | 'Jorgen' : ('Jorgen Stenarson', 'jorgen.stenarson@bostream.nu'), |
|
49 | 49 | 'Matthias' : ('Matthias Bussonnier', 'bussonniermatthias@gmail.com'), |
|
50 | 50 | } |
|
51 | 51 | |
|
52 | 52 | author = 'The IPython Development Team' |
|
53 | 53 | |
|
54 | 54 | author_email = 'ipython-dev@python.org' |
@@ -1,1505 +1,1702 b'' | |||
|
1 | 1 | # encoding: utf-8 |
|
2 | 2 | """Tests for the IPython tab-completion machinery.""" |
|
3 | 3 | |
|
4 | 4 | # Copyright (c) IPython Development Team. |
|
5 | 5 | # Distributed under the terms of the Modified BSD License. |
|
6 | 6 | |
|
7 | 7 | import os |
|
8 | 8 | import pytest |
|
9 | 9 | import sys |
|
10 | 10 | import textwrap |
|
11 | 11 | import unittest |
|
12 | 12 | |
|
13 | 13 | from contextlib import contextmanager |
|
14 | 14 | |
|
15 | 15 | from traitlets.config.loader import Config |
|
16 | 16 | from IPython import get_ipython |
|
17 | 17 | from IPython.core import completer |
|
18 | 18 | from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory |
|
19 | 19 | from IPython.utils.generics import complete_object |
|
20 | 20 | from IPython.testing import decorators as dec |
|
21 | 21 | |
|
22 | 22 | from IPython.core.completer import ( |
|
23 | 23 | Completion, |
|
24 | 24 | provisionalcompleter, |
|
25 | 25 | match_dict_keys, |
|
26 | 26 | _deduplicate_completions, |
|
27 | _match_number_in_dict_key_prefix, | |
|
27 | 28 | completion_matcher, |
|
28 | 29 | SimpleCompletion, |
|
29 | 30 | CompletionContext, |
|
30 | 31 | ) |
|
31 | 32 | |
|
32 | 33 | # ----------------------------------------------------------------------------- |
|
33 | 34 | # Test functions |
|
34 | 35 | # ----------------------------------------------------------------------------- |
|
35 | 36 | |
|
36 | 37 | def recompute_unicode_ranges(): |
|
37 | 38 | """ |
|
38 | 39 | utility to recompute the largest unicode range without any characters |
|
39 | 40 | |
|
40 | 41 | use to recompute the gap in the global _UNICODE_RANGES of completer.py |
|
41 | 42 | """ |
|
42 | 43 | import itertools |
|
43 | 44 | import unicodedata |
|
44 | 45 | valid = [] |
|
45 | 46 | for c in range(0,0x10FFFF + 1): |
|
46 | 47 | try: |
|
47 | 48 | unicodedata.name(chr(c)) |
|
48 | 49 | except ValueError: |
|
49 | 50 | continue |
|
50 | 51 | valid.append(c) |
|
51 | 52 | |
|
52 | 53 | def ranges(i): |
|
53 | 54 | for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]): |
|
54 | 55 | b = list(b) |
|
55 | 56 | yield b[0][1], b[-1][1] |
|
56 | 57 | |
|
57 | 58 | rg = list(ranges(valid)) |
|
58 | 59 | lens = [] |
|
59 | 60 | gap_lens = [] |
|
60 | 61 | pstart, pstop = 0,0 |
|
61 | 62 | for start, stop in rg: |
|
62 | 63 | lens.append(stop-start) |
|
63 | 64 | gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%')) |
|
64 | 65 | pstart, pstop = start, stop |
|
65 | 66 | |
|
66 | 67 | return sorted(gap_lens)[-1] |
|
67 | 68 | |
|
68 | 69 | |
|
69 | 70 | |
|
70 | 71 | def test_unicode_range(): |
|
71 | 72 | """ |
|
72 | 73 | Test that the ranges we test for unicode names give the same number of |
|
73 | 74 | results than testing the full length. |
|
74 | 75 | """ |
|
75 | 76 | from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES |
|
76 | 77 | |
|
77 | 78 | expected_list = _unicode_name_compute([(0, 0x110000)]) |
|
78 | 79 | test = _unicode_name_compute(_UNICODE_RANGES) |
|
79 | 80 | len_exp = len(expected_list) |
|
80 | 81 | len_test = len(test) |
|
81 | 82 | |
|
82 | 83 | # do not inline the len() or on error pytest will try to print the 130 000 + |
|
83 | 84 | # elements. |
|
84 | 85 | message = None |
|
85 | 86 | if len_exp != len_test or len_exp > 131808: |
|
86 | 87 | size, start, stop, prct = recompute_unicode_ranges() |
|
87 | 88 | message = f"""_UNICODE_RANGES likely wrong and need updating. This is |
|
88 | 89 | likely due to a new release of Python. We've find that the biggest gap |
|
89 | 90 | in unicode characters has reduces in size to be {size} characters |
|
90 | 91 | ({prct}), from {start}, to {stop}. In completer.py likely update to |
|
91 | 92 | |
|
92 | 93 | _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)] |
|
93 | 94 | |
|
94 | 95 | And update the assertion below to use |
|
95 | 96 | |
|
96 | 97 | len_exp <= {len_exp} |
|
97 | 98 | """ |
|
98 | 99 | assert len_exp == len_test, message |
|
99 | 100 | |
|
100 | 101 | # fail if new unicode symbols have been added. |
|
101 |
assert len_exp <= 1 |
|
|
102 | assert len_exp <= 143041, message | |
|
102 | 103 | |
|
103 | 104 | |
|
104 | 105 | @contextmanager |
|
105 | 106 | def greedy_completion(): |
|
106 | 107 | ip = get_ipython() |
|
107 | 108 | greedy_original = ip.Completer.greedy |
|
108 | 109 | try: |
|
109 | 110 | ip.Completer.greedy = True |
|
110 | 111 | yield |
|
111 | 112 | finally: |
|
112 | 113 | ip.Completer.greedy = greedy_original |
|
113 | 114 | |
|
114 | 115 | |
|
115 | 116 | @contextmanager |
|
117 | def evaluation_policy(evaluation: str): | |
|
118 | ip = get_ipython() | |
|
119 | evaluation_original = ip.Completer.evaluation | |
|
120 | try: | |
|
121 | ip.Completer.evaluation = evaluation | |
|
122 | yield | |
|
123 | finally: | |
|
124 | ip.Completer.evaluation = evaluation_original | |
|
125 | ||
|
126 | ||
|
127 | @contextmanager | |
|
116 | 128 | def custom_matchers(matchers): |
|
117 | 129 | ip = get_ipython() |
|
118 | 130 | try: |
|
119 | 131 | ip.Completer.custom_matchers.extend(matchers) |
|
120 | 132 | yield |
|
121 | 133 | finally: |
|
122 | 134 | ip.Completer.custom_matchers.clear() |
|
123 | 135 | |
|
124 | 136 | |
|
125 | 137 | def test_protect_filename(): |
|
126 | 138 | if sys.platform == "win32": |
|
127 | 139 | pairs = [ |
|
128 | 140 | ("abc", "abc"), |
|
129 | 141 | (" abc", '" abc"'), |
|
130 | 142 | ("a bc", '"a bc"'), |
|
131 | 143 | ("a bc", '"a bc"'), |
|
132 | 144 | (" bc", '" bc"'), |
|
133 | 145 | ] |
|
134 | 146 | else: |
|
135 | 147 | pairs = [ |
|
136 | 148 | ("abc", "abc"), |
|
137 | 149 | (" abc", r"\ abc"), |
|
138 | 150 | ("a bc", r"a\ bc"), |
|
139 | 151 | ("a bc", r"a\ \ bc"), |
|
140 | 152 | (" bc", r"\ \ bc"), |
|
141 | 153 | # On posix, we also protect parens and other special characters. |
|
142 | 154 | ("a(bc", r"a\(bc"), |
|
143 | 155 | ("a)bc", r"a\)bc"), |
|
144 | 156 | ("a( )bc", r"a\(\ \)bc"), |
|
145 | 157 | ("a[1]bc", r"a\[1\]bc"), |
|
146 | 158 | ("a{1}bc", r"a\{1\}bc"), |
|
147 | 159 | ("a#bc", r"a\#bc"), |
|
148 | 160 | ("a?bc", r"a\?bc"), |
|
149 | 161 | ("a=bc", r"a\=bc"), |
|
150 | 162 | ("a\\bc", r"a\\bc"), |
|
151 | 163 | ("a|bc", r"a\|bc"), |
|
152 | 164 | ("a;bc", r"a\;bc"), |
|
153 | 165 | ("a:bc", r"a\:bc"), |
|
154 | 166 | ("a'bc", r"a\'bc"), |
|
155 | 167 | ("a*bc", r"a\*bc"), |
|
156 | 168 | ('a"bc', r"a\"bc"), |
|
157 | 169 | ("a^bc", r"a\^bc"), |
|
158 | 170 | ("a&bc", r"a\&bc"), |
|
159 | 171 | ] |
|
160 | 172 | # run the actual tests |
|
161 | 173 | for s1, s2 in pairs: |
|
162 | 174 | s1p = completer.protect_filename(s1) |
|
163 | 175 | assert s1p == s2 |
|
164 | 176 | |
|
165 | 177 | |
|
166 | 178 | def check_line_split(splitter, test_specs): |
|
167 | 179 | for part1, part2, split in test_specs: |
|
168 | 180 | cursor_pos = len(part1) |
|
169 | 181 | line = part1 + part2 |
|
170 | 182 | out = splitter.split_line(line, cursor_pos) |
|
171 | 183 | assert out == split |
|
172 | 184 | |
|
173 | ||
|
174 | 185 | def test_line_split(): |
|
175 | 186 | """Basic line splitter test with default specs.""" |
|
176 | 187 | sp = completer.CompletionSplitter() |
|
177 | 188 | # The format of the test specs is: part1, part2, expected answer. Parts 1 |
|
178 | 189 | # and 2 are joined into the 'line' sent to the splitter, as if the cursor |
|
179 | 190 | # was at the end of part1. So an empty part2 represents someone hitting |
|
180 | 191 | # tab at the end of the line, the most common case. |
|
181 | 192 | t = [ |
|
182 | 193 | ("run some/scrip", "", "some/scrip"), |
|
183 | 194 | ("run scripts/er", "ror.py foo", "scripts/er"), |
|
184 | 195 | ("echo $HOM", "", "HOM"), |
|
185 | 196 | ("print sys.pa", "", "sys.pa"), |
|
186 | 197 | ("print(sys.pa", "", "sys.pa"), |
|
187 | 198 | ("execfile('scripts/er", "", "scripts/er"), |
|
188 | 199 | ("a[x.", "", "x."), |
|
189 | 200 | ("a[x.", "y", "x."), |
|
190 | 201 | ('cd "some_file/', "", "some_file/"), |
|
191 | 202 | ] |
|
192 | 203 | check_line_split(sp, t) |
|
193 | 204 | # Ensure splitting works OK with unicode by re-running the tests with |
|
194 | 205 | # all inputs turned into unicode |
|
195 | 206 | check_line_split(sp, [map(str, p) for p in t]) |
|
196 | 207 | |
|
197 | 208 | |
|
198 | 209 | class NamedInstanceClass: |
|
199 | 210 | instances = {} |
|
200 | 211 | |
|
201 | 212 | def __init__(self, name): |
|
202 | 213 | self.instances[name] = self |
|
203 | 214 | |
|
204 | 215 | @classmethod |
|
205 | 216 | def _ipython_key_completions_(cls): |
|
206 | 217 | return cls.instances.keys() |
|
207 | 218 | |
|
208 | 219 | |
|
209 | 220 | class KeyCompletable: |
|
210 | 221 | def __init__(self, things=()): |
|
211 | 222 | self.things = things |
|
212 | 223 | |
|
213 | 224 | def _ipython_key_completions_(self): |
|
214 | 225 | return list(self.things) |
|
215 | 226 | |
|
216 | 227 | |
|
217 | 228 | class TestCompleter(unittest.TestCase): |
|
218 | 229 | def setUp(self): |
|
219 | 230 | """ |
|
220 | 231 | We want to silence all PendingDeprecationWarning when testing the completer |
|
221 | 232 | """ |
|
222 | 233 | self._assertwarns = self.assertWarns(PendingDeprecationWarning) |
|
223 | 234 | self._assertwarns.__enter__() |
|
224 | 235 | |
|
225 | 236 | def tearDown(self): |
|
226 | 237 | try: |
|
227 | 238 | self._assertwarns.__exit__(None, None, None) |
|
228 | 239 | except AssertionError: |
|
229 | 240 | pass |
|
230 | 241 | |
|
231 | 242 | def test_custom_completion_error(self): |
|
232 | 243 | """Test that errors from custom attribute completers are silenced.""" |
|
233 | 244 | ip = get_ipython() |
|
234 | 245 | |
|
235 | 246 | class A: |
|
236 | 247 | pass |
|
237 | 248 | |
|
238 | 249 | ip.user_ns["x"] = A() |
|
239 | 250 | |
|
240 | 251 | @complete_object.register(A) |
|
241 | 252 | def complete_A(a, existing_completions): |
|
242 | 253 | raise TypeError("this should be silenced") |
|
243 | 254 | |
|
244 | 255 | ip.complete("x.") |
|
245 | 256 | |
|
246 | 257 | def test_custom_completion_ordering(self): |
|
247 | 258 | """Test that errors from custom attribute completers are silenced.""" |
|
248 | 259 | ip = get_ipython() |
|
249 | 260 | |
|
250 | 261 | _, matches = ip.complete('in') |
|
251 | 262 | assert matches.index('input') < matches.index('int') |
|
252 | 263 | |
|
253 | 264 | def complete_example(a): |
|
254 | 265 | return ['example2', 'example1'] |
|
255 | 266 | |
|
256 | 267 | ip.Completer.custom_completers.add_re('ex*', complete_example) |
|
257 | 268 | _, matches = ip.complete('ex') |
|
258 | 269 | assert matches.index('example2') < matches.index('example1') |
|
259 | 270 | |
|
260 | 271 | def test_unicode_completions(self): |
|
261 | 272 | ip = get_ipython() |
|
262 | 273 | # Some strings that trigger different types of completion. Check them both |
|
263 | 274 | # in str and unicode forms |
|
264 | 275 | s = ["ru", "%ru", "cd /", "floa", "float(x)/"] |
|
265 | 276 | for t in s + list(map(str, s)): |
|
266 | 277 | # We don't need to check exact completion values (they may change |
|
267 | 278 | # depending on the state of the namespace, but at least no exceptions |
|
268 | 279 | # should be thrown and the return value should be a pair of text, list |
|
269 | 280 | # values. |
|
270 | 281 | text, matches = ip.complete(t) |
|
271 | 282 | self.assertIsInstance(text, str) |
|
272 | 283 | self.assertIsInstance(matches, list) |
|
273 | 284 | |
|
274 | 285 | def test_latex_completions(self): |
|
275 | 286 | from IPython.core.latex_symbols import latex_symbols |
|
276 | 287 | import random |
|
277 | 288 | |
|
278 | 289 | ip = get_ipython() |
|
279 | 290 | # Test some random unicode symbols |
|
280 | 291 | keys = random.sample(sorted(latex_symbols), 10) |
|
281 | 292 | for k in keys: |
|
282 | 293 | text, matches = ip.complete(k) |
|
283 | 294 | self.assertEqual(text, k) |
|
284 | 295 | self.assertEqual(matches, [latex_symbols[k]]) |
|
285 | 296 | # Test a more complex line |
|
286 | 297 | text, matches = ip.complete("print(\\alpha") |
|
287 | 298 | self.assertEqual(text, "\\alpha") |
|
288 | 299 | self.assertEqual(matches[0], latex_symbols["\\alpha"]) |
|
289 | 300 | # Test multiple matching latex symbols |
|
290 | 301 | text, matches = ip.complete("\\al") |
|
291 | 302 | self.assertIn("\\alpha", matches) |
|
292 | 303 | self.assertIn("\\aleph", matches) |
|
293 | 304 | |
|
294 | 305 | def test_latex_no_results(self): |
|
295 | 306 | """ |
|
296 | 307 | forward latex should really return nothing in either field if nothing is found. |
|
297 | 308 | """ |
|
298 | 309 | ip = get_ipython() |
|
299 | 310 | text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing") |
|
300 | 311 | self.assertEqual(text, "") |
|
301 | 312 | self.assertEqual(matches, ()) |
|
302 | 313 | |
|
303 | 314 | def test_back_latex_completion(self): |
|
304 | 315 | ip = get_ipython() |
|
305 | 316 | |
|
306 | 317 | # do not return more than 1 matches for \beta, only the latex one. |
|
307 | 318 | name, matches = ip.complete("\\β") |
|
308 | 319 | self.assertEqual(matches, ["\\beta"]) |
|
309 | 320 | |
|
310 | 321 | def test_back_unicode_completion(self): |
|
311 | 322 | ip = get_ipython() |
|
312 | 323 | |
|
313 | 324 | name, matches = ip.complete("\\Ⅴ") |
|
314 | 325 | self.assertEqual(matches, ["\\ROMAN NUMERAL FIVE"]) |
|
315 | 326 | |
|
316 | 327 | def test_forward_unicode_completion(self): |
|
317 | 328 | ip = get_ipython() |
|
318 | 329 | |
|
319 | 330 | name, matches = ip.complete("\\ROMAN NUMERAL FIVE") |
|
320 | 331 | self.assertEqual(matches, ["Ⅴ"]) # This is not a V |
|
321 | 332 | self.assertEqual(matches, ["\u2164"]) # same as above but explicit. |
|
322 | 333 | |
|
323 | 334 | def test_delim_setting(self): |
|
324 | 335 | sp = completer.CompletionSplitter() |
|
325 | 336 | sp.delims = " " |
|
326 | 337 | self.assertEqual(sp.delims, " ") |
|
327 | 338 | self.assertEqual(sp._delim_expr, r"[\ ]") |
|
328 | 339 | |
|
329 | 340 | def test_spaces(self): |
|
330 | 341 | """Test with only spaces as split chars.""" |
|
331 | 342 | sp = completer.CompletionSplitter() |
|
332 | 343 | sp.delims = " " |
|
333 | 344 | t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")] |
|
334 | 345 | check_line_split(sp, t) |
|
335 | 346 | |
|
336 | 347 | def test_has_open_quotes1(self): |
|
337 | 348 | for s in ["'", "'''", "'hi' '"]: |
|
338 | 349 | self.assertEqual(completer.has_open_quotes(s), "'") |
|
339 | 350 | |
|
340 | 351 | def test_has_open_quotes2(self): |
|
341 | 352 | for s in ['"', '"""', '"hi" "']: |
|
342 | 353 | self.assertEqual(completer.has_open_quotes(s), '"') |
|
343 | 354 | |
|
344 | 355 | def test_has_open_quotes3(self): |
|
345 | 356 | for s in ["''", "''' '''", "'hi' 'ipython'"]: |
|
346 | 357 | self.assertFalse(completer.has_open_quotes(s)) |
|
347 | 358 | |
|
348 | 359 | def test_has_open_quotes4(self): |
|
349 | 360 | for s in ['""', '""" """', '"hi" "ipython"']: |
|
350 | 361 | self.assertFalse(completer.has_open_quotes(s)) |
|
351 | 362 | |
|
352 | 363 | @pytest.mark.xfail( |
|
353 | 364 | sys.platform == "win32", reason="abspath completions fail on Windows" |
|
354 | 365 | ) |
|
355 | 366 | def test_abspath_file_completions(self): |
|
356 | 367 | ip = get_ipython() |
|
357 | 368 | with TemporaryDirectory() as tmpdir: |
|
358 | 369 | prefix = os.path.join(tmpdir, "foo") |
|
359 | 370 | suffixes = ["1", "2"] |
|
360 | 371 | names = [prefix + s for s in suffixes] |
|
361 | 372 | for n in names: |
|
362 | 373 | open(n, "w", encoding="utf-8").close() |
|
363 | 374 | |
|
364 | 375 | # Check simple completion |
|
365 | 376 | c = ip.complete(prefix)[1] |
|
366 | 377 | self.assertEqual(c, names) |
|
367 | 378 | |
|
368 | 379 | # Now check with a function call |
|
369 | 380 | cmd = 'a = f("%s' % prefix |
|
370 | 381 | c = ip.complete(prefix, cmd)[1] |
|
371 | 382 | comp = [prefix + s for s in suffixes] |
|
372 | 383 | self.assertEqual(c, comp) |
|
373 | 384 | |
|
374 | 385 | def test_local_file_completions(self): |
|
375 | 386 | ip = get_ipython() |
|
376 | 387 | with TemporaryWorkingDirectory(): |
|
377 | 388 | prefix = "./foo" |
|
378 | 389 | suffixes = ["1", "2"] |
|
379 | 390 | names = [prefix + s for s in suffixes] |
|
380 | 391 | for n in names: |
|
381 | 392 | open(n, "w", encoding="utf-8").close() |
|
382 | 393 | |
|
383 | 394 | # Check simple completion |
|
384 | 395 | c = ip.complete(prefix)[1] |
|
385 | 396 | self.assertEqual(c, names) |
|
386 | 397 | |
|
387 | 398 | # Now check with a function call |
|
388 | 399 | cmd = 'a = f("%s' % prefix |
|
389 | 400 | c = ip.complete(prefix, cmd)[1] |
|
390 | 401 | comp = {prefix + s for s in suffixes} |
|
391 | 402 | self.assertTrue(comp.issubset(set(c))) |
|
392 | 403 | |
|
393 | 404 | def test_quoted_file_completions(self): |
|
394 | 405 | ip = get_ipython() |
|
395 | 406 | |
|
396 | 407 | def _(text): |
|
397 | 408 | return ip.Completer._complete( |
|
398 | 409 | cursor_line=0, cursor_pos=len(text), full_text=text |
|
399 | 410 | )["IPCompleter.file_matcher"]["completions"] |
|
400 | 411 | |
|
401 | 412 | with TemporaryWorkingDirectory(): |
|
402 | 413 | name = "foo'bar" |
|
403 | 414 | open(name, "w", encoding="utf-8").close() |
|
404 | 415 | |
|
405 | 416 | # Don't escape Windows |
|
406 | 417 | escaped = name if sys.platform == "win32" else "foo\\'bar" |
|
407 | 418 | |
|
408 | 419 | # Single quote matches embedded single quote |
|
409 | 420 | c = _("open('foo")[0] |
|
410 | 421 | self.assertEqual(c.text, escaped) |
|
411 | 422 | |
|
412 | 423 | # Double quote requires no escape |
|
413 | 424 | c = _('open("foo')[0] |
|
414 | 425 | self.assertEqual(c.text, name) |
|
415 | 426 | |
|
416 | 427 | # No quote requires an escape |
|
417 | 428 | c = _("%ls foo")[0] |
|
418 | 429 | self.assertEqual(c.text, escaped) |
|
419 | 430 | |
|
420 | 431 | def test_all_completions_dups(self): |
|
421 | 432 | """ |
|
422 | 433 | Make sure the output of `IPCompleter.all_completions` does not have |
|
423 | 434 | duplicated prefixes. |
|
424 | 435 | """ |
|
425 | 436 | ip = get_ipython() |
|
426 | 437 | c = ip.Completer |
|
427 | 438 | ip.ex("class TestClass():\n\ta=1\n\ta1=2") |
|
428 | 439 | for jedi_status in [True, False]: |
|
429 | 440 | with provisionalcompleter(): |
|
430 | 441 | ip.Completer.use_jedi = jedi_status |
|
431 | 442 | matches = c.all_completions("TestCl") |
|
432 | 443 | assert matches == ["TestClass"], (jedi_status, matches) |
|
433 | 444 | matches = c.all_completions("TestClass.") |
|
434 | 445 | assert len(matches) > 2, (jedi_status, matches) |
|
435 | 446 | matches = c.all_completions("TestClass.a") |
|
436 | 447 | assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status |
|
437 | 448 | |
|
438 | 449 | def test_jedi(self): |
|
439 | 450 | """ |
|
440 | 451 | A couple of issue we had with Jedi |
|
441 | 452 | """ |
|
442 | 453 | ip = get_ipython() |
|
443 | 454 | |
|
444 | 455 | def _test_complete(reason, s, comp, start=None, end=None): |
|
445 | 456 | l = len(s) |
|
446 | 457 | start = start if start is not None else l |
|
447 | 458 | end = end if end is not None else l |
|
448 | 459 | with provisionalcompleter(): |
|
449 | 460 | ip.Completer.use_jedi = True |
|
450 | 461 | completions = set(ip.Completer.completions(s, l)) |
|
451 | 462 | ip.Completer.use_jedi = False |
|
452 | 463 | assert Completion(start, end, comp) in completions, reason |
|
453 | 464 | |
|
454 | 465 | def _test_not_complete(reason, s, comp): |
|
455 | 466 | l = len(s) |
|
456 | 467 | with provisionalcompleter(): |
|
457 | 468 | ip.Completer.use_jedi = True |
|
458 | 469 | completions = set(ip.Completer.completions(s, l)) |
|
459 | 470 | ip.Completer.use_jedi = False |
|
460 | 471 | assert Completion(l, l, comp) not in completions, reason |
|
461 | 472 | |
|
462 | 473 | import jedi |
|
463 | 474 | |
|
464 | 475 | jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3]) |
|
465 | 476 | if jedi_version > (0, 10): |
|
466 | 477 | _test_complete("jedi >0.9 should complete and not crash", "a=1;a.", "real") |
|
467 | 478 | _test_complete("can infer first argument", 'a=(1,"foo");a[0].', "real") |
|
468 | 479 | _test_complete("can infer second argument", 'a=(1,"foo");a[1].', "capitalize") |
|
469 | 480 | _test_complete("cover duplicate completions", "im", "import", 0, 2) |
|
470 | 481 | |
|
471 | 482 | _test_not_complete("does not mix types", 'a=(1,"foo");a[0].', "capitalize") |
|
472 | 483 | |
|
473 | 484 | def test_completion_have_signature(self): |
|
474 | 485 | """ |
|
475 | 486 | Lets make sure jedi is capable of pulling out the signature of the function we are completing. |
|
476 | 487 | """ |
|
477 | 488 | ip = get_ipython() |
|
478 | 489 | with provisionalcompleter(): |
|
479 | 490 | ip.Completer.use_jedi = True |
|
480 | 491 | completions = ip.Completer.completions("ope", 3) |
|
481 | 492 | c = next(completions) # should be `open` |
|
482 | 493 | ip.Completer.use_jedi = False |
|
483 | 494 | assert "file" in c.signature, "Signature of function was not found by completer" |
|
484 | 495 | assert ( |
|
485 | 496 | "encoding" in c.signature |
|
486 | 497 | ), "Signature of function was not found by completer" |
|
487 | 498 | |
|
488 | 499 | def test_completions_have_type(self): |
|
489 | 500 | """ |
|
490 | 501 | Lets make sure matchers provide completion type. |
|
491 | 502 | """ |
|
492 | 503 | ip = get_ipython() |
|
493 | 504 | with provisionalcompleter(): |
|
494 | 505 | ip.Completer.use_jedi = False |
|
495 | 506 | completions = ip.Completer.completions("%tim", 3) |
|
496 | 507 | c = next(completions) # should be `%time` or similar |
|
497 | 508 | assert c.type == "magic", "Type of magic was not assigned by completer" |
|
498 | 509 | |
|
499 | 510 | @pytest.mark.xfail(reason="Known failure on jedi<=0.18.0") |
|
500 | 511 | def test_deduplicate_completions(self): |
|
501 | 512 | """ |
|
502 | 513 | Test that completions are correctly deduplicated (even if ranges are not the same) |
|
503 | 514 | """ |
|
504 | 515 | ip = get_ipython() |
|
505 | 516 | ip.ex( |
|
506 | 517 | textwrap.dedent( |
|
507 | 518 | """ |
|
508 | 519 | class Z: |
|
509 | 520 | zoo = 1 |
|
510 | 521 | """ |
|
511 | 522 | ) |
|
512 | 523 | ) |
|
513 | 524 | with provisionalcompleter(): |
|
514 | 525 | ip.Completer.use_jedi = True |
|
515 | 526 | l = list( |
|
516 | 527 | _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3)) |
|
517 | 528 | ) |
|
518 | 529 | ip.Completer.use_jedi = False |
|
519 | 530 | |
|
520 | 531 | assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l |
|
521 | 532 | assert l[0].text == "zoo" # and not `it.accumulate` |
|
522 | 533 | |
|
523 | 534 | def test_greedy_completions(self): |
|
524 | 535 | """ |
|
525 | 536 |
Test the capability of the Greedy completer. |
|
526 | 537 | |
|
527 | 538 | Most of the test here does not really show off the greedy completer, for proof |
|
528 | 539 |
each of the text below now pass with Jedi. The greedy completer is capable of more. |
|
529 | 540 | |
|
530 | 541 | See the :any:`test_dict_key_completion_contexts` |
|
531 | 542 | |
|
532 | 543 | """ |
|
533 | 544 | ip = get_ipython() |
|
534 | 545 | ip.ex("a=list(range(5))") |
|
535 | 546 | _, c = ip.complete(".", line="a[0].") |
|
536 | 547 | self.assertFalse(".real" in c, "Shouldn't have completed on a[0]: %s" % c) |
|
537 | 548 | |
|
538 | 549 | def _(line, cursor_pos, expect, message, completion): |
|
539 | 550 | with greedy_completion(), provisionalcompleter(): |
|
540 | 551 | ip.Completer.use_jedi = False |
|
541 | 552 | _, c = ip.complete(".", line=line, cursor_pos=cursor_pos) |
|
542 | 553 | self.assertIn(expect, c, message % c) |
|
543 | 554 | |
|
544 | 555 | ip.Completer.use_jedi = True |
|
545 | 556 | with provisionalcompleter(): |
|
546 | 557 | completions = ip.Completer.completions(line, cursor_pos) |
|
547 | 558 | self.assertIn(completion, completions) |
|
548 | 559 | |
|
549 | 560 | with provisionalcompleter(): |
|
550 | 561 | _( |
|
551 | 562 | "a[0].", |
|
552 | 563 | 5, |
|
553 | 564 | "a[0].real", |
|
554 | 565 | "Should have completed on a[0].: %s", |
|
555 | 566 | Completion(5, 5, "real"), |
|
556 | 567 | ) |
|
557 | 568 | _( |
|
558 | 569 | "a[0].r", |
|
559 | 570 | 6, |
|
560 | 571 | "a[0].real", |
|
561 | 572 | "Should have completed on a[0].r: %s", |
|
562 | 573 | Completion(5, 6, "real"), |
|
563 | 574 | ) |
|
564 | 575 | |
|
565 | 576 | _( |
|
566 | 577 | "a[0].from_", |
|
567 | 578 | 10, |
|
568 | 579 | "a[0].from_bytes", |
|
569 | 580 | "Should have completed on a[0].from_: %s", |
|
570 | 581 | Completion(5, 10, "from_bytes"), |
|
571 | 582 | ) |
|
572 | 583 | |
|
573 | 584 | def test_omit__names(self): |
|
574 | 585 | # also happens to test IPCompleter as a configurable |
|
575 | 586 | ip = get_ipython() |
|
576 | 587 | ip._hidden_attr = 1 |
|
577 | 588 | ip._x = {} |
|
578 | 589 | c = ip.Completer |
|
579 | 590 | ip.ex("ip=get_ipython()") |
|
580 | 591 | cfg = Config() |
|
581 | 592 | cfg.IPCompleter.omit__names = 0 |
|
582 | 593 | c.update_config(cfg) |
|
583 | 594 | with provisionalcompleter(): |
|
584 | 595 | c.use_jedi = False |
|
585 | 596 | s, matches = c.complete("ip.") |
|
586 | 597 | self.assertIn("ip.__str__", matches) |
|
587 | 598 | self.assertIn("ip._hidden_attr", matches) |
|
588 | 599 | |
|
589 | 600 | # c.use_jedi = True |
|
590 | 601 | # completions = set(c.completions('ip.', 3)) |
|
591 | 602 | # self.assertIn(Completion(3, 3, '__str__'), completions) |
|
592 | 603 | # self.assertIn(Completion(3,3, "_hidden_attr"), completions) |
|
593 | 604 | |
|
594 | 605 | cfg = Config() |
|
595 | 606 | cfg.IPCompleter.omit__names = 1 |
|
596 | 607 | c.update_config(cfg) |
|
597 | 608 | with provisionalcompleter(): |
|
598 | 609 | c.use_jedi = False |
|
599 | 610 | s, matches = c.complete("ip.") |
|
600 | 611 | self.assertNotIn("ip.__str__", matches) |
|
601 | 612 | # self.assertIn('ip._hidden_attr', matches) |
|
602 | 613 | |
|
603 | 614 | # c.use_jedi = True |
|
604 | 615 | # completions = set(c.completions('ip.', 3)) |
|
605 | 616 | # self.assertNotIn(Completion(3,3,'__str__'), completions) |
|
606 | 617 | # self.assertIn(Completion(3,3, "_hidden_attr"), completions) |
|
607 | 618 | |
|
608 | 619 | cfg = Config() |
|
609 | 620 | cfg.IPCompleter.omit__names = 2 |
|
610 | 621 | c.update_config(cfg) |
|
611 | 622 | with provisionalcompleter(): |
|
612 | 623 | c.use_jedi = False |
|
613 | 624 | s, matches = c.complete("ip.") |
|
614 | 625 | self.assertNotIn("ip.__str__", matches) |
|
615 | 626 | self.assertNotIn("ip._hidden_attr", matches) |
|
616 | 627 | |
|
617 | 628 | # c.use_jedi = True |
|
618 | 629 | # completions = set(c.completions('ip.', 3)) |
|
619 | 630 | # self.assertNotIn(Completion(3,3,'__str__'), completions) |
|
620 | 631 | # self.assertNotIn(Completion(3,3, "_hidden_attr"), completions) |
|
621 | 632 | |
|
622 | 633 | with provisionalcompleter(): |
|
623 | 634 | c.use_jedi = False |
|
624 | 635 | s, matches = c.complete("ip._x.") |
|
625 | 636 | self.assertIn("ip._x.keys", matches) |
|
626 | 637 | |
|
627 | 638 | # c.use_jedi = True |
|
628 | 639 | # completions = set(c.completions('ip._x.', 6)) |
|
629 | 640 | # self.assertIn(Completion(6,6, "keys"), completions) |
|
630 | 641 | |
|
631 | 642 | del ip._hidden_attr |
|
632 | 643 | del ip._x |
|
633 | 644 | |
|
634 | 645 | def test_limit_to__all__False_ok(self): |
|
635 | 646 | """ |
|
636 | 647 | Limit to all is deprecated, once we remove it this test can go away. |
|
637 | 648 | """ |
|
638 | 649 | ip = get_ipython() |
|
639 | 650 | c = ip.Completer |
|
640 | 651 | c.use_jedi = False |
|
641 | 652 | ip.ex("class D: x=24") |
|
642 | 653 | ip.ex("d=D()") |
|
643 | 654 | cfg = Config() |
|
644 | 655 | cfg.IPCompleter.limit_to__all__ = False |
|
645 | 656 | c.update_config(cfg) |
|
646 | 657 | s, matches = c.complete("d.") |
|
647 | 658 | self.assertIn("d.x", matches) |
|
648 | 659 | |
|
649 | 660 | def test_get__all__entries_ok(self): |
|
650 | 661 | class A: |
|
651 | 662 | __all__ = ["x", 1] |
|
652 | 663 | |
|
653 | 664 | words = completer.get__all__entries(A()) |
|
654 | 665 | self.assertEqual(words, ["x"]) |
|
655 | 666 | |
|
656 | 667 | def test_get__all__entries_no__all__ok(self): |
|
657 | 668 | class A: |
|
658 | 669 | pass |
|
659 | 670 | |
|
660 | 671 | words = completer.get__all__entries(A()) |
|
661 | 672 | self.assertEqual(words, []) |
|
662 | 673 | |
|
663 | 674 | def test_func_kw_completions(self): |
|
664 | 675 | ip = get_ipython() |
|
665 | 676 | c = ip.Completer |
|
666 | 677 | c.use_jedi = False |
|
667 | 678 | ip.ex("def myfunc(a=1,b=2): return a+b") |
|
668 | 679 | s, matches = c.complete(None, "myfunc(1,b") |
|
669 | 680 | self.assertIn("b=", matches) |
|
670 | 681 | # Simulate completing with cursor right after b (pos==10): |
|
671 | 682 | s, matches = c.complete(None, "myfunc(1,b)", 10) |
|
672 | 683 | self.assertIn("b=", matches) |
|
673 | 684 | s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b') |
|
674 | 685 | self.assertIn("b=", matches) |
|
675 | 686 | # builtin function |
|
676 | 687 | s, matches = c.complete(None, "min(k, k") |
|
677 | 688 | self.assertIn("key=", matches) |
|
678 | 689 | |
|
679 | 690 | def test_default_arguments_from_docstring(self): |
|
680 | 691 | ip = get_ipython() |
|
681 | 692 | c = ip.Completer |
|
682 | 693 | kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value") |
|
683 | 694 | self.assertEqual(kwd, ["key"]) |
|
684 | 695 | # with cython type etc |
|
685 | 696 | kwd = c._default_arguments_from_docstring( |
|
686 | 697 | "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n" |
|
687 | 698 | ) |
|
688 | 699 | self.assertEqual(kwd, ["ncall", "resume", "nsplit"]) |
|
689 | 700 | # white spaces |
|
690 | 701 | kwd = c._default_arguments_from_docstring( |
|
691 | 702 | "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n" |
|
692 | 703 | ) |
|
693 | 704 | self.assertEqual(kwd, ["ncall", "resume", "nsplit"]) |
|
694 | 705 | |
|
695 | 706 | def test_line_magics(self): |
|
696 | 707 | ip = get_ipython() |
|
697 | 708 | c = ip.Completer |
|
698 | 709 | s, matches = c.complete(None, "lsmag") |
|
699 | 710 | self.assertIn("%lsmagic", matches) |
|
700 | 711 | s, matches = c.complete(None, "%lsmag") |
|
701 | 712 | self.assertIn("%lsmagic", matches) |
|
702 | 713 | |
|
703 | 714 | def test_cell_magics(self): |
|
704 | 715 | from IPython.core.magic import register_cell_magic |
|
705 | 716 | |
|
706 | 717 | @register_cell_magic |
|
707 | 718 | def _foo_cellm(line, cell): |
|
708 | 719 | pass |
|
709 | 720 | |
|
710 | 721 | ip = get_ipython() |
|
711 | 722 | c = ip.Completer |
|
712 | 723 | |
|
713 | 724 | s, matches = c.complete(None, "_foo_ce") |
|
714 | 725 | self.assertIn("%%_foo_cellm", matches) |
|
715 | 726 | s, matches = c.complete(None, "%%_foo_ce") |
|
716 | 727 | self.assertIn("%%_foo_cellm", matches) |
|
717 | 728 | |
|
718 | 729 | def test_line_cell_magics(self): |
|
719 | 730 | from IPython.core.magic import register_line_cell_magic |
|
720 | 731 | |
|
721 | 732 | @register_line_cell_magic |
|
722 | 733 | def _bar_cellm(line, cell): |
|
723 | 734 | pass |
|
724 | 735 | |
|
725 | 736 | ip = get_ipython() |
|
726 | 737 | c = ip.Completer |
|
727 | 738 | |
|
728 | 739 | # The policy here is trickier, see comments in completion code. The |
|
729 | 740 | # returned values depend on whether the user passes %% or not explicitly, |
|
730 | 741 | # and this will show a difference if the same name is both a line and cell |
|
731 | 742 | # magic. |
|
732 | 743 | s, matches = c.complete(None, "_bar_ce") |
|
733 | 744 | self.assertIn("%_bar_cellm", matches) |
|
734 | 745 | self.assertIn("%%_bar_cellm", matches) |
|
735 | 746 | s, matches = c.complete(None, "%_bar_ce") |
|
736 | 747 | self.assertIn("%_bar_cellm", matches) |
|
737 | 748 | self.assertIn("%%_bar_cellm", matches) |
|
738 | 749 | s, matches = c.complete(None, "%%_bar_ce") |
|
739 | 750 | self.assertNotIn("%_bar_cellm", matches) |
|
740 | 751 | self.assertIn("%%_bar_cellm", matches) |
|
741 | 752 | |
|
742 | 753 | def test_magic_completion_order(self): |
|
743 | 754 | ip = get_ipython() |
|
744 | 755 | c = ip.Completer |
|
745 | 756 | |
|
746 | 757 | # Test ordering of line and cell magics. |
|
747 | 758 | text, matches = c.complete("timeit") |
|
748 | 759 | self.assertEqual(matches, ["%timeit", "%%timeit"]) |
|
749 | 760 | |
|
750 | 761 | def test_magic_completion_shadowing(self): |
|
751 | 762 | ip = get_ipython() |
|
752 | 763 | c = ip.Completer |
|
753 | 764 | c.use_jedi = False |
|
754 | 765 | |
|
755 | 766 | # Before importing matplotlib, %matplotlib magic should be the only option. |
|
756 | 767 | text, matches = c.complete("mat") |
|
757 | 768 | self.assertEqual(matches, ["%matplotlib"]) |
|
758 | 769 | |
|
759 | 770 | # The newly introduced name should shadow the magic. |
|
760 | 771 | ip.run_cell("matplotlib = 1") |
|
761 | 772 | text, matches = c.complete("mat") |
|
762 | 773 | self.assertEqual(matches, ["matplotlib"]) |
|
763 | 774 | |
|
764 | 775 | # After removing matplotlib from namespace, the magic should again be |
|
765 | 776 | # the only option. |
|
766 | 777 | del ip.user_ns["matplotlib"] |
|
767 | 778 | text, matches = c.complete("mat") |
|
768 | 779 | self.assertEqual(matches, ["%matplotlib"]) |
|
769 | 780 | |
|
770 | 781 | def test_magic_completion_shadowing_explicit(self): |
|
771 | 782 | """ |
|
772 | 783 | If the user try to complete a shadowed magic, and explicit % start should |
|
773 | 784 | still return the completions. |
|
774 | 785 | """ |
|
775 | 786 | ip = get_ipython() |
|
776 | 787 | c = ip.Completer |
|
777 | 788 | |
|
778 | 789 | # Before importing matplotlib, %matplotlib magic should be the only option. |
|
779 | 790 | text, matches = c.complete("%mat") |
|
780 | 791 | self.assertEqual(matches, ["%matplotlib"]) |
|
781 | 792 | |
|
782 | 793 | ip.run_cell("matplotlib = 1") |
|
783 | 794 | |
|
784 | 795 | # After removing matplotlib from namespace, the magic should still be |
|
785 | 796 | # the only option. |
|
786 | 797 | text, matches = c.complete("%mat") |
|
787 | 798 | self.assertEqual(matches, ["%matplotlib"]) |
|
788 | 799 | |
|
789 | 800 | def test_magic_config(self): |
|
790 | 801 | ip = get_ipython() |
|
791 | 802 | c = ip.Completer |
|
792 | 803 | |
|
793 | 804 | s, matches = c.complete(None, "conf") |
|
794 | 805 | self.assertIn("%config", matches) |
|
795 | 806 | s, matches = c.complete(None, "conf") |
|
796 | 807 | self.assertNotIn("AliasManager", matches) |
|
797 | 808 | s, matches = c.complete(None, "config ") |
|
798 | 809 | self.assertIn("AliasManager", matches) |
|
799 | 810 | s, matches = c.complete(None, "%config ") |
|
800 | 811 | self.assertIn("AliasManager", matches) |
|
801 | 812 | s, matches = c.complete(None, "config Ali") |
|
802 | 813 | self.assertListEqual(["AliasManager"], matches) |
|
803 | 814 | s, matches = c.complete(None, "%config Ali") |
|
804 | 815 | self.assertListEqual(["AliasManager"], matches) |
|
805 | 816 | s, matches = c.complete(None, "config AliasManager") |
|
806 | 817 | self.assertListEqual(["AliasManager"], matches) |
|
807 | 818 | s, matches = c.complete(None, "%config AliasManager") |
|
808 | 819 | self.assertListEqual(["AliasManager"], matches) |
|
809 | 820 | s, matches = c.complete(None, "config AliasManager.") |
|
810 | 821 | self.assertIn("AliasManager.default_aliases", matches) |
|
811 | 822 | s, matches = c.complete(None, "%config AliasManager.") |
|
812 | 823 | self.assertIn("AliasManager.default_aliases", matches) |
|
813 | 824 | s, matches = c.complete(None, "config AliasManager.de") |
|
814 | 825 | self.assertListEqual(["AliasManager.default_aliases"], matches) |
|
815 | 826 | s, matches = c.complete(None, "config AliasManager.de") |
|
816 | 827 | self.assertListEqual(["AliasManager.default_aliases"], matches) |
|
817 | 828 | |
|
818 | 829 | def test_magic_color(self): |
|
819 | 830 | ip = get_ipython() |
|
820 | 831 | c = ip.Completer |
|
821 | 832 | |
|
822 | 833 | s, matches = c.complete(None, "colo") |
|
823 | 834 | self.assertIn("%colors", matches) |
|
824 | 835 | s, matches = c.complete(None, "colo") |
|
825 | 836 | self.assertNotIn("NoColor", matches) |
|
826 | 837 | s, matches = c.complete(None, "%colors") # No trailing space |
|
827 | 838 | self.assertNotIn("NoColor", matches) |
|
828 | 839 | s, matches = c.complete(None, "colors ") |
|
829 | 840 | self.assertIn("NoColor", matches) |
|
830 | 841 | s, matches = c.complete(None, "%colors ") |
|
831 | 842 | self.assertIn("NoColor", matches) |
|
832 | 843 | s, matches = c.complete(None, "colors NoCo") |
|
833 | 844 | self.assertListEqual(["NoColor"], matches) |
|
834 | 845 | s, matches = c.complete(None, "%colors NoCo") |
|
835 | 846 | self.assertListEqual(["NoColor"], matches) |
|
836 | 847 | |
|
837 | 848 | def test_match_dict_keys(self): |
|
838 | 849 | """ |
|
839 | 850 | Test that match_dict_keys works on a couple of use case does return what |
|
840 | 851 | expected, and does not crash |
|
841 | 852 | """ |
|
842 | 853 | delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?" |
|
843 | 854 | |
|
844 | keys = ["foo", b"far"] | |
|
845 |
|
|
|
846 | assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"]) | |
|
847 | assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"]) | |
|
848 | assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"]) | |
|
849 | ||
|
850 | assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"]) | |
|
851 | assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"]) | |
|
852 | assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"]) | |
|
853 | assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"]) | |
|
855 | def match(*args, **kwargs): | |
|
856 | quote, offset, matches = match_dict_keys(*args, delims=delims, **kwargs) | |
|
857 | return quote, offset, list(matches) | |
|
854 | 858 | |
|
855 | match_dict_keys | |
|
859 | keys = ["foo", b"far"] | |
|
860 | assert match(keys, "b'") == ("'", 2, ["far"]) | |
|
861 | assert match(keys, "b'f") == ("'", 2, ["far"]) | |
|
862 | assert match(keys, 'b"') == ('"', 2, ["far"]) | |
|
863 | assert match(keys, 'b"f') == ('"', 2, ["far"]) | |
|
864 | ||
|
865 | assert match(keys, "'") == ("'", 1, ["foo"]) | |
|
866 | assert match(keys, "'f") == ("'", 1, ["foo"]) | |
|
867 | assert match(keys, '"') == ('"', 1, ["foo"]) | |
|
868 | assert match(keys, '"f') == ('"', 1, ["foo"]) | |
|
869 | ||
|
870 | # Completion on first item of tuple | |
|
871 | keys = [("foo", 1111), ("foo", 2222), (3333, "bar"), (3333, "test")] | |
|
872 | assert match(keys, "'f") == ("'", 1, ["foo"]) | |
|
873 | assert match(keys, "33") == ("", 0, ["3333"]) | |
|
874 | ||
|
875 | # Completion on numbers | |
|
876 | keys = [ | |
|
877 | 0xDEADBEEF, | |
|
878 | 1111, | |
|
879 | 1234, | |
|
880 | "1999", | |
|
881 | 0b10101, | |
|
882 | 22, | |
|
883 | ] # 0xDEADBEEF = 3735928559; 0b10101 = 21 | |
|
884 | assert match(keys, "0xdead") == ("", 0, ["0xdeadbeef"]) | |
|
885 | assert match(keys, "1") == ("", 0, ["1111", "1234"]) | |
|
886 | assert match(keys, "2") == ("", 0, ["21", "22"]) | |
|
887 | assert match(keys, "0b101") == ("", 0, ["0b10101", "0b10110"]) | |
|
888 | ||
|
889 | # Should yield on variables | |
|
890 | assert match(keys, "a_variable") == ("", 0, []) | |
|
891 | ||
|
892 | # Should pass over invalid literals | |
|
893 | assert match(keys, "'' ''") == ("", 0, []) | |
|
856 | 894 | |
|
857 | 895 | def test_match_dict_keys_tuple(self): |
|
858 | 896 | """ |
|
859 | 897 | Test that match_dict_keys called with extra prefix works on a couple of use case, |
|
860 | 898 | does return what expected, and does not crash. |
|
861 | 899 | """ |
|
862 | 900 | delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?" |
|
863 | 901 | |
|
864 | 902 | keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')] |
|
865 | 903 | |
|
904 | def match(*args, extra=None, **kwargs): | |
|
905 | quote, offset, matches = match_dict_keys( | |
|
906 | *args, delims=delims, extra_prefix=extra, **kwargs | |
|
907 | ) | |
|
908 | return quote, offset, list(matches) | |
|
909 | ||
|
866 | 910 | # Completion on first key == "foo" |
|
867 |
assert match |
|
|
868 |
assert match |
|
|
869 |
assert match |
|
|
870 |
assert match |
|
|
871 |
assert match |
|
|
872 |
assert match |
|
|
873 |
assert match |
|
|
874 |
assert match |
|
|
911 | assert match(keys, "'", extra=("foo",)) == ("'", 1, ["bar", "oof"]) | |
|
912 | assert match(keys, '"', extra=("foo",)) == ('"', 1, ["bar", "oof"]) | |
|
913 | assert match(keys, "'o", extra=("foo",)) == ("'", 1, ["oof"]) | |
|
914 | assert match(keys, '"o', extra=("foo",)) == ('"', 1, ["oof"]) | |
|
915 | assert match(keys, "b'", extra=("foo",)) == ("'", 2, ["bar"]) | |
|
916 | assert match(keys, 'b"', extra=("foo",)) == ('"', 2, ["bar"]) | |
|
917 | assert match(keys, "b'b", extra=("foo",)) == ("'", 2, ["bar"]) | |
|
918 | assert match(keys, 'b"b', extra=("foo",)) == ('"', 2, ["bar"]) | |
|
875 | 919 | |
|
876 | 920 | # No Completion |
|
877 |
assert match |
|
|
878 |
assert match |
|
|
921 | assert match(keys, "'", extra=("no_foo",)) == ("'", 1, []) | |
|
922 | assert match(keys, "'", extra=("fo",)) == ("'", 1, []) | |
|
923 | ||
|
924 | keys = [("foo1", "foo2", "foo3", "foo4"), ("foo1", "foo2", "bar", "foo4")] | |
|
925 | assert match(keys, "'foo", extra=("foo1",)) == ("'", 1, ["foo2"]) | |
|
926 | assert match(keys, "'foo", extra=("foo1", "foo2")) == ("'", 1, ["foo3"]) | |
|
927 | assert match(keys, "'foo", extra=("foo1", "foo2", "foo3")) == ("'", 1, ["foo4"]) | |
|
928 | assert match(keys, "'foo", extra=("foo1", "foo2", "foo3", "foo4")) == ( | |
|
929 | "'", | |
|
930 | 1, | |
|
931 | [], | |
|
932 | ) | |
|
933 | ||
|
934 | keys = [("foo", 1111), ("foo", "2222"), (3333, "bar"), (3333, 4444)] | |
|
935 | assert match(keys, "'", extra=("foo",)) == ("'", 1, ["2222"]) | |
|
936 | assert match(keys, "", extra=("foo",)) == ("", 0, ["1111", "'2222'"]) | |
|
937 | assert match(keys, "'", extra=(3333,)) == ("'", 1, ["bar"]) | |
|
938 | assert match(keys, "", extra=(3333,)) == ("", 0, ["'bar'", "4444"]) | |
|
939 | assert match(keys, "'", extra=("3333",)) == ("'", 1, []) | |
|
940 | assert match(keys, "33") == ("", 0, ["3333"]) | |
|
941 | ||
|
942 | def test_dict_key_completion_closures(self): | |
|
943 | ip = get_ipython() | |
|
944 | complete = ip.Completer.complete | |
|
945 | ip.Completer.auto_close_dict_keys = True | |
|
946 | ||
|
947 | ip.user_ns["d"] = { | |
|
948 | # tuple only | |
|
949 | ("aa", 11): None, | |
|
950 | # tuple and non-tuple | |
|
951 | ("bb", 22): None, | |
|
952 | "bb": None, | |
|
953 | # non-tuple only | |
|
954 | "cc": None, | |
|
955 | # numeric tuple only | |
|
956 | (77, "x"): None, | |
|
957 | # numeric tuple and non-tuple | |
|
958 | (88, "y"): None, | |
|
959 | 88: None, | |
|
960 | # numeric non-tuple only | |
|
961 | 99: None, | |
|
962 | } | |
|
879 | 963 | |
|
880 | keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')] | |
|
881 | assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"]) | |
|
882 | assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"]) | |
|
883 | assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"]) | |
|
884 | assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, []) | |
|
964 | _, matches = complete(line_buffer="d[") | |
|
965 | # should append `, ` if matches a tuple only | |
|
966 | self.assertIn("'aa', ", matches) | |
|
967 | # should not append anything if matches a tuple and an item | |
|
968 | self.assertIn("'bb'", matches) | |
|
969 | # should append `]` if matches and item only | |
|
970 | self.assertIn("'cc']", matches) | |
|
971 | ||
|
972 | # should append `, ` if matches a tuple only | |
|
973 | self.assertIn("77, ", matches) | |
|
974 | # should not append anything if matches a tuple and an item | |
|
975 | self.assertIn("88", matches) | |
|
976 | # should append `]` if matches and item only | |
|
977 | self.assertIn("99]", matches) | |
|
978 | ||
|
979 | _, matches = complete(line_buffer="d['aa', ") | |
|
980 | # should restrict matches to those matching tuple prefix | |
|
981 | self.assertIn("11]", matches) | |
|
982 | self.assertNotIn("'bb'", matches) | |
|
983 | self.assertNotIn("'bb', ", matches) | |
|
984 | self.assertNotIn("'bb']", matches) | |
|
985 | self.assertNotIn("'cc'", matches) | |
|
986 | self.assertNotIn("'cc', ", matches) | |
|
987 | self.assertNotIn("'cc']", matches) | |
|
988 | ip.Completer.auto_close_dict_keys = False | |
|
885 | 989 | |
|
886 | 990 | def test_dict_key_completion_string(self): |
|
887 | 991 | """Test dictionary key completion for string keys""" |
|
888 | 992 | ip = get_ipython() |
|
889 | 993 | complete = ip.Completer.complete |
|
890 | 994 | |
|
891 | 995 | ip.user_ns["d"] = {"abc": None} |
|
892 | 996 | |
|
893 | 997 | # check completion at different stages |
|
894 | 998 | _, matches = complete(line_buffer="d[") |
|
895 | 999 | self.assertIn("'abc'", matches) |
|
896 | 1000 | self.assertNotIn("'abc']", matches) |
|
897 | 1001 | |
|
898 | 1002 | _, matches = complete(line_buffer="d['") |
|
899 | 1003 | self.assertIn("abc", matches) |
|
900 | 1004 | self.assertNotIn("abc']", matches) |
|
901 | 1005 | |
|
902 | 1006 | _, matches = complete(line_buffer="d['a") |
|
903 | 1007 | self.assertIn("abc", matches) |
|
904 | 1008 | self.assertNotIn("abc']", matches) |
|
905 | 1009 | |
|
906 | 1010 | # check use of different quoting |
|
907 | 1011 | _, matches = complete(line_buffer='d["') |
|
908 | 1012 | self.assertIn("abc", matches) |
|
909 | 1013 | self.assertNotIn('abc"]', matches) |
|
910 | 1014 | |
|
911 | 1015 | _, matches = complete(line_buffer='d["a') |
|
912 | 1016 | self.assertIn("abc", matches) |
|
913 | 1017 | self.assertNotIn('abc"]', matches) |
|
914 | 1018 | |
|
915 | 1019 | # check sensitivity to following context |
|
916 | 1020 | _, matches = complete(line_buffer="d[]", cursor_pos=2) |
|
917 | 1021 | self.assertIn("'abc'", matches) |
|
918 | 1022 | |
|
919 | 1023 | _, matches = complete(line_buffer="d['']", cursor_pos=3) |
|
920 | 1024 | self.assertIn("abc", matches) |
|
921 | 1025 | self.assertNotIn("abc'", matches) |
|
922 | 1026 | self.assertNotIn("abc']", matches) |
|
923 | 1027 | |
|
924 | 1028 | # check multiple solutions are correctly returned and that noise is not |
|
925 | 1029 | ip.user_ns["d"] = { |
|
926 | 1030 | "abc": None, |
|
927 | 1031 | "abd": None, |
|
928 | 1032 | "bad": None, |
|
929 | 1033 | object(): None, |
|
930 | 1034 | 5: None, |
|
931 | 1035 | ("abe", None): None, |
|
932 | 1036 | (None, "abf"): None |
|
933 | 1037 | } |
|
934 | 1038 | |
|
935 | 1039 | _, matches = complete(line_buffer="d['a") |
|
936 | 1040 | self.assertIn("abc", matches) |
|
937 | 1041 | self.assertIn("abd", matches) |
|
938 | 1042 | self.assertNotIn("bad", matches) |
|
939 | 1043 | self.assertNotIn("abe", matches) |
|
940 | 1044 | self.assertNotIn("abf", matches) |
|
941 | 1045 | assert not any(m.endswith(("]", '"', "'")) for m in matches), matches |
|
942 | 1046 | |
|
943 | 1047 | # check escaping and whitespace |
|
944 | 1048 | ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None} |
|
945 | 1049 | _, matches = complete(line_buffer="d['a") |
|
946 | 1050 | self.assertIn("a\\nb", matches) |
|
947 | 1051 | self.assertIn("a\\'b", matches) |
|
948 | 1052 | self.assertIn('a"b', matches) |
|
949 | 1053 | self.assertIn("a word", matches) |
|
950 | 1054 | assert not any(m.endswith(("]", '"', "'")) for m in matches), matches |
|
951 | 1055 | |
|
952 | 1056 | # - can complete on non-initial word of the string |
|
953 | 1057 | _, matches = complete(line_buffer="d['a w") |
|
954 | 1058 | self.assertIn("word", matches) |
|
955 | 1059 | |
|
956 | 1060 | # - understands quote escaping |
|
957 | 1061 | _, matches = complete(line_buffer="d['a\\'") |
|
958 | 1062 | self.assertIn("b", matches) |
|
959 | 1063 | |
|
960 | 1064 | # - default quoting should work like repr |
|
961 | 1065 | _, matches = complete(line_buffer="d[") |
|
962 | 1066 | self.assertIn('"a\'b"', matches) |
|
963 | 1067 | |
|
964 | 1068 | # - when opening quote with ", possible to match with unescaped apostrophe |
|
965 | 1069 | _, matches = complete(line_buffer="d[\"a'") |
|
966 | 1070 | self.assertIn("b", matches) |
|
967 | 1071 | |
|
968 | 1072 | # need to not split at delims that readline won't split at |
|
969 | 1073 | if "-" not in ip.Completer.splitter.delims: |
|
970 | 1074 | ip.user_ns["d"] = {"before-after": None} |
|
971 | 1075 | _, matches = complete(line_buffer="d['before-af") |
|
972 | 1076 | self.assertIn("before-after", matches) |
|
973 | 1077 | |
|
974 | 1078 | # check completion on tuple-of-string keys at different stage - on first key |
|
975 | 1079 | ip.user_ns["d"] = {('foo', 'bar'): None} |
|
976 | 1080 | _, matches = complete(line_buffer="d[") |
|
977 | 1081 | self.assertIn("'foo'", matches) |
|
978 | 1082 | self.assertNotIn("'foo']", matches) |
|
979 | 1083 | self.assertNotIn("'bar'", matches) |
|
980 | 1084 | self.assertNotIn("foo", matches) |
|
981 | 1085 | self.assertNotIn("bar", matches) |
|
982 | 1086 | |
|
983 | 1087 | # - match the prefix |
|
984 | 1088 | _, matches = complete(line_buffer="d['f") |
|
985 | 1089 | self.assertIn("foo", matches) |
|
986 | 1090 | self.assertNotIn("foo']", matches) |
|
987 | 1091 | self.assertNotIn('foo"]', matches) |
|
988 | 1092 | _, matches = complete(line_buffer="d['foo") |
|
989 | 1093 | self.assertIn("foo", matches) |
|
990 | 1094 | |
|
991 | 1095 | # - can complete on second key |
|
992 | 1096 | _, matches = complete(line_buffer="d['foo', ") |
|
993 | 1097 | self.assertIn("'bar'", matches) |
|
994 | 1098 | _, matches = complete(line_buffer="d['foo', 'b") |
|
995 | 1099 | self.assertIn("bar", matches) |
|
996 | 1100 | self.assertNotIn("foo", matches) |
|
997 | 1101 | |
|
998 | 1102 | # - does not propose missing keys |
|
999 | 1103 | _, matches = complete(line_buffer="d['foo', 'f") |
|
1000 | 1104 | self.assertNotIn("bar", matches) |
|
1001 | 1105 | self.assertNotIn("foo", matches) |
|
1002 | 1106 | |
|
1003 | 1107 | # check sensitivity to following context |
|
1004 | 1108 | _, matches = complete(line_buffer="d['foo',]", cursor_pos=8) |
|
1005 | 1109 | self.assertIn("'bar'", matches) |
|
1006 | 1110 | self.assertNotIn("bar", matches) |
|
1007 | 1111 | self.assertNotIn("'foo'", matches) |
|
1008 | 1112 | self.assertNotIn("foo", matches) |
|
1009 | 1113 | |
|
1010 | 1114 | _, matches = complete(line_buffer="d['']", cursor_pos=3) |
|
1011 | 1115 | self.assertIn("foo", matches) |
|
1012 | 1116 | assert not any(m.endswith(("]", '"', "'")) for m in matches), matches |
|
1013 | 1117 | |
|
1014 | 1118 | _, matches = complete(line_buffer='d[""]', cursor_pos=3) |
|
1015 | 1119 | self.assertIn("foo", matches) |
|
1016 | 1120 | assert not any(m.endswith(("]", '"', "'")) for m in matches), matches |
|
1017 | 1121 | |
|
1018 | 1122 | _, matches = complete(line_buffer='d["foo","]', cursor_pos=9) |
|
1019 | 1123 | self.assertIn("bar", matches) |
|
1020 | 1124 | assert not any(m.endswith(("]", '"', "'")) for m in matches), matches |
|
1021 | 1125 | |
|
1022 | 1126 | _, matches = complete(line_buffer='d["foo",]', cursor_pos=8) |
|
1023 | 1127 | self.assertIn("'bar'", matches) |
|
1024 | 1128 | self.assertNotIn("bar", matches) |
|
1025 | 1129 | |
|
1026 | 1130 | # Can complete with longer tuple keys |
|
1027 | 1131 | ip.user_ns["d"] = {('foo', 'bar', 'foobar'): None} |
|
1028 | 1132 | |
|
1029 | 1133 | # - can complete second key |
|
1030 | 1134 | _, matches = complete(line_buffer="d['foo', 'b") |
|
1031 | 1135 | self.assertIn("bar", matches) |
|
1032 | 1136 | self.assertNotIn("foo", matches) |
|
1033 | 1137 | self.assertNotIn("foobar", matches) |
|
1034 | 1138 | |
|
1035 | 1139 | # - can complete third key |
|
1036 | 1140 | _, matches = complete(line_buffer="d['foo', 'bar', 'fo") |
|
1037 | 1141 | self.assertIn("foobar", matches) |
|
1038 | 1142 | self.assertNotIn("foo", matches) |
|
1039 | 1143 | self.assertNotIn("bar", matches) |
|
1040 | 1144 | |
|
1145 | def test_dict_key_completion_numbers(self): | |
|
1146 | ip = get_ipython() | |
|
1147 | complete = ip.Completer.complete | |
|
1148 | ||
|
1149 | ip.user_ns["d"] = { | |
|
1150 | 0xDEADBEEF: None, # 3735928559 | |
|
1151 | 1111: None, | |
|
1152 | 1234: None, | |
|
1153 | "1999": None, | |
|
1154 | 0b10101: None, # 21 | |
|
1155 | 22: None, | |
|
1156 | } | |
|
1157 | _, matches = complete(line_buffer="d[1") | |
|
1158 | self.assertIn("1111", matches) | |
|
1159 | self.assertIn("1234", matches) | |
|
1160 | self.assertNotIn("1999", matches) | |
|
1161 | self.assertNotIn("'1999'", matches) | |
|
1162 | ||
|
1163 | _, matches = complete(line_buffer="d[0xdead") | |
|
1164 | self.assertIn("0xdeadbeef", matches) | |
|
1165 | ||
|
1166 | _, matches = complete(line_buffer="d[2") | |
|
1167 | self.assertIn("21", matches) | |
|
1168 | self.assertIn("22", matches) | |
|
1169 | ||
|
1170 | _, matches = complete(line_buffer="d[0b101") | |
|
1171 | self.assertIn("0b10101", matches) | |
|
1172 | self.assertIn("0b10110", matches) | |
|
1173 | ||
|
1041 | 1174 | def test_dict_key_completion_contexts(self): |
|
1042 | 1175 | """Test expression contexts in which dict key completion occurs""" |
|
1043 | 1176 | ip = get_ipython() |
|
1044 | 1177 | complete = ip.Completer.complete |
|
1045 | 1178 | d = {"abc": None} |
|
1046 | 1179 | ip.user_ns["d"] = d |
|
1047 | 1180 | |
|
1048 | 1181 | class C: |
|
1049 | 1182 | data = d |
|
1050 | 1183 | |
|
1051 | 1184 | ip.user_ns["C"] = C |
|
1052 | 1185 | ip.user_ns["get"] = lambda: d |
|
1186 | ip.user_ns["nested"] = {"x": d} | |
|
1053 | 1187 | |
|
1054 | 1188 | def assert_no_completion(**kwargs): |
|
1055 | 1189 | _, matches = complete(**kwargs) |
|
1056 | 1190 | self.assertNotIn("abc", matches) |
|
1057 | 1191 | self.assertNotIn("abc'", matches) |
|
1058 | 1192 | self.assertNotIn("abc']", matches) |
|
1059 | 1193 | self.assertNotIn("'abc'", matches) |
|
1060 | 1194 | self.assertNotIn("'abc']", matches) |
|
1061 | 1195 | |
|
1062 | 1196 | def assert_completion(**kwargs): |
|
1063 | 1197 | _, matches = complete(**kwargs) |
|
1064 | 1198 | self.assertIn("'abc'", matches) |
|
1065 | 1199 | self.assertNotIn("'abc']", matches) |
|
1066 | 1200 | |
|
1067 | 1201 | # no completion after string closed, even if reopened |
|
1068 | 1202 | assert_no_completion(line_buffer="d['a'") |
|
1069 | 1203 | assert_no_completion(line_buffer='d["a"') |
|
1070 | 1204 | assert_no_completion(line_buffer="d['a' + ") |
|
1071 | 1205 | assert_no_completion(line_buffer="d['a' + '") |
|
1072 | 1206 | |
|
1073 | 1207 | # completion in non-trivial expressions |
|
1074 | 1208 | assert_completion(line_buffer="+ d[") |
|
1075 | 1209 | assert_completion(line_buffer="(d[") |
|
1076 | 1210 | assert_completion(line_buffer="C.data[") |
|
1077 | 1211 | |
|
1212 | # nested dict completion | |
|
1213 | assert_completion(line_buffer="nested['x'][") | |
|
1214 | ||
|
1215 | with evaluation_policy("minimal"): | |
|
1216 | with pytest.raises(AssertionError): | |
|
1217 | assert_completion(line_buffer="nested['x'][") | |
|
1218 | ||
|
1078 | 1219 | # greedy flag |
|
1079 | 1220 | def assert_completion(**kwargs): |
|
1080 | 1221 | _, matches = complete(**kwargs) |
|
1081 | 1222 | self.assertIn("get()['abc']", matches) |
|
1082 | 1223 | |
|
1083 | 1224 | assert_no_completion(line_buffer="get()[") |
|
1084 | 1225 | with greedy_completion(): |
|
1085 | 1226 | assert_completion(line_buffer="get()[") |
|
1086 | 1227 | assert_completion(line_buffer="get()['") |
|
1087 | 1228 | assert_completion(line_buffer="get()['a") |
|
1088 | 1229 | assert_completion(line_buffer="get()['ab") |
|
1089 | 1230 | assert_completion(line_buffer="get()['abc") |
|
1090 | 1231 | |
|
1091 | 1232 | def test_dict_key_completion_bytes(self): |
|
1092 | 1233 | """Test handling of bytes in dict key completion""" |
|
1093 | 1234 | ip = get_ipython() |
|
1094 | 1235 | complete = ip.Completer.complete |
|
1095 | 1236 | |
|
1096 | 1237 | ip.user_ns["d"] = {"abc": None, b"abd": None} |
|
1097 | 1238 | |
|
1098 | 1239 | _, matches = complete(line_buffer="d[") |
|
1099 | 1240 | self.assertIn("'abc'", matches) |
|
1100 | 1241 | self.assertIn("b'abd'", matches) |
|
1101 | 1242 | |
|
1102 | 1243 | if False: # not currently implemented |
|
1103 | 1244 | _, matches = complete(line_buffer="d[b") |
|
1104 | 1245 | self.assertIn("b'abd'", matches) |
|
1105 | 1246 | self.assertNotIn("b'abc'", matches) |
|
1106 | 1247 | |
|
1107 | 1248 | _, matches = complete(line_buffer="d[b'") |
|
1108 | 1249 | self.assertIn("abd", matches) |
|
1109 | 1250 | self.assertNotIn("abc", matches) |
|
1110 | 1251 | |
|
1111 | 1252 | _, matches = complete(line_buffer="d[B'") |
|
1112 | 1253 | self.assertIn("abd", matches) |
|
1113 | 1254 | self.assertNotIn("abc", matches) |
|
1114 | 1255 | |
|
1115 | 1256 | _, matches = complete(line_buffer="d['") |
|
1116 | 1257 | self.assertIn("abc", matches) |
|
1117 | 1258 | self.assertNotIn("abd", matches) |
|
1118 | 1259 | |
|
1119 | 1260 | def test_dict_key_completion_unicode_py3(self): |
|
1120 | 1261 | """Test handling of unicode in dict key completion""" |
|
1121 | 1262 | ip = get_ipython() |
|
1122 | 1263 | complete = ip.Completer.complete |
|
1123 | 1264 | |
|
1124 | 1265 | ip.user_ns["d"] = {"a\u05d0": None} |
|
1125 | 1266 | |
|
1126 | 1267 | # query using escape |
|
1127 | 1268 | if sys.platform != "win32": |
|
1128 | 1269 | # Known failure on Windows |
|
1129 | 1270 | _, matches = complete(line_buffer="d['a\\u05d0") |
|
1130 | 1271 | self.assertIn("u05d0", matches) # tokenized after \\ |
|
1131 | 1272 | |
|
1132 | 1273 | # query using character |
|
1133 | 1274 | _, matches = complete(line_buffer="d['a\u05d0") |
|
1134 | 1275 | self.assertIn("a\u05d0", matches) |
|
1135 | 1276 | |
|
1136 | 1277 | with greedy_completion(): |
|
1137 | 1278 | # query using escape |
|
1138 | 1279 | _, matches = complete(line_buffer="d['a\\u05d0") |
|
1139 | 1280 | self.assertIn("d['a\\u05d0']", matches) # tokenized after \\ |
|
1140 | 1281 | |
|
1141 | 1282 | # query using character |
|
1142 | 1283 | _, matches = complete(line_buffer="d['a\u05d0") |
|
1143 | 1284 | self.assertIn("d['a\u05d0']", matches) |
|
1144 | 1285 | |
|
1145 | 1286 | @dec.skip_without("numpy") |
|
1146 | 1287 | def test_struct_array_key_completion(self): |
|
1147 | 1288 | """Test dict key completion applies to numpy struct arrays""" |
|
1148 | 1289 | import numpy |
|
1149 | 1290 | |
|
1150 | 1291 | ip = get_ipython() |
|
1151 | 1292 | complete = ip.Completer.complete |
|
1152 | 1293 | ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")]) |
|
1153 | 1294 | _, matches = complete(line_buffer="d['") |
|
1154 | 1295 | self.assertIn("hello", matches) |
|
1155 | 1296 | self.assertIn("world", matches) |
|
1156 | 1297 | # complete on the numpy struct itself |
|
1157 | 1298 | dt = numpy.dtype( |
|
1158 | 1299 | [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)] |
|
1159 | 1300 | ) |
|
1160 | 1301 | x = numpy.zeros(2, dtype=dt) |
|
1161 | 1302 | ip.user_ns["d"] = x[1] |
|
1162 | 1303 | _, matches = complete(line_buffer="d['") |
|
1163 | 1304 | self.assertIn("my_head", matches) |
|
1164 | 1305 | self.assertIn("my_data", matches) |
|
1165 | # complete on a nested level | |
|
1166 |
|
|
|
1306 | ||
|
1307 | def completes_on_nested(): | |
|
1167 | 1308 | ip.user_ns["d"] = numpy.zeros(2, dtype=dt) |
|
1168 | 1309 | _, matches = complete(line_buffer="d[1]['my_head']['") |
|
1169 | 1310 | self.assertTrue(any(["my_dt" in m for m in matches])) |
|
1170 | 1311 | self.assertTrue(any(["my_df" in m for m in matches])) |
|
1312 | # complete on a nested level | |
|
1313 | with greedy_completion(): | |
|
1314 | completes_on_nested() | |
|
1315 | ||
|
1316 | with evaluation_policy("limited"): | |
|
1317 | completes_on_nested() | |
|
1318 | ||
|
1319 | with evaluation_policy("minimal"): | |
|
1320 | with pytest.raises(AssertionError): | |
|
1321 | completes_on_nested() | |
|
1171 | 1322 | |
|
1172 | 1323 | @dec.skip_without("pandas") |
|
1173 | 1324 | def test_dataframe_key_completion(self): |
|
1174 | 1325 | """Test dict key completion applies to pandas DataFrames""" |
|
1175 | 1326 | import pandas |
|
1176 | 1327 | |
|
1177 | 1328 | ip = get_ipython() |
|
1178 | 1329 | complete = ip.Completer.complete |
|
1179 | 1330 | ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]}) |
|
1180 | 1331 | _, matches = complete(line_buffer="d['") |
|
1181 | 1332 | self.assertIn("hello", matches) |
|
1182 | 1333 | self.assertIn("world", matches) |
|
1334 | _, matches = complete(line_buffer="d.loc[:, '") | |
|
1335 | self.assertIn("hello", matches) | |
|
1336 | self.assertIn("world", matches) | |
|
1337 | _, matches = complete(line_buffer="d.loc[1:, '") | |
|
1338 | self.assertIn("hello", matches) | |
|
1339 | _, matches = complete(line_buffer="d.loc[1:1, '") | |
|
1340 | self.assertIn("hello", matches) | |
|
1341 | _, matches = complete(line_buffer="d.loc[1:1:-1, '") | |
|
1342 | self.assertIn("hello", matches) | |
|
1343 | _, matches = complete(line_buffer="d.loc[::, '") | |
|
1344 | self.assertIn("hello", matches) | |
|
1183 | 1345 | |
|
1184 | 1346 | def test_dict_key_completion_invalids(self): |
|
1185 | 1347 | """Smoke test cases dict key completion can't handle""" |
|
1186 | 1348 | ip = get_ipython() |
|
1187 | 1349 | complete = ip.Completer.complete |
|
1188 | 1350 | |
|
1189 | 1351 | ip.user_ns["no_getitem"] = None |
|
1190 | 1352 | ip.user_ns["no_keys"] = [] |
|
1191 | 1353 | ip.user_ns["cant_call_keys"] = dict |
|
1192 | 1354 | ip.user_ns["empty"] = {} |
|
1193 | 1355 | ip.user_ns["d"] = {"abc": 5} |
|
1194 | 1356 | |
|
1195 | 1357 | _, matches = complete(line_buffer="no_getitem['") |
|
1196 | 1358 | _, matches = complete(line_buffer="no_keys['") |
|
1197 | 1359 | _, matches = complete(line_buffer="cant_call_keys['") |
|
1198 | 1360 | _, matches = complete(line_buffer="empty['") |
|
1199 | 1361 | _, matches = complete(line_buffer="name_error['") |
|
1200 | 1362 | _, matches = complete(line_buffer="d['\\") # incomplete escape |
|
1201 | 1363 | |
|
1202 | 1364 | def test_object_key_completion(self): |
|
1203 | 1365 | ip = get_ipython() |
|
1204 | 1366 | ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"]) |
|
1205 | 1367 | |
|
1206 | 1368 | _, matches = ip.Completer.complete(line_buffer="key_completable['qw") |
|
1207 | 1369 | self.assertIn("qwerty", matches) |
|
1208 | 1370 | self.assertIn("qwick", matches) |
|
1209 | 1371 | |
|
1210 | 1372 | def test_class_key_completion(self): |
|
1211 | 1373 | ip = get_ipython() |
|
1212 | 1374 | NamedInstanceClass("qwerty") |
|
1213 | 1375 | NamedInstanceClass("qwick") |
|
1214 | 1376 | ip.user_ns["named_instance_class"] = NamedInstanceClass |
|
1215 | 1377 | |
|
1216 | 1378 | _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw") |
|
1217 | 1379 | self.assertIn("qwerty", matches) |
|
1218 | 1380 | self.assertIn("qwick", matches) |
|
1219 | 1381 | |
|
1220 | 1382 | def test_tryimport(self): |
|
1221 | 1383 | """ |
|
1222 | 1384 | Test that try-import don't crash on trailing dot, and import modules before |
|
1223 | 1385 | """ |
|
1224 | 1386 | from IPython.core.completerlib import try_import |
|
1225 | 1387 | |
|
1226 | 1388 | assert try_import("IPython.") |
|
1227 | 1389 | |
|
1228 | 1390 | def test_aimport_module_completer(self): |
|
1229 | 1391 | ip = get_ipython() |
|
1230 | 1392 | _, matches = ip.complete("i", "%aimport i") |
|
1231 | 1393 | self.assertIn("io", matches) |
|
1232 | 1394 | self.assertNotIn("int", matches) |
|
1233 | 1395 | |
|
1234 | 1396 | def test_nested_import_module_completer(self): |
|
1235 | 1397 | ip = get_ipython() |
|
1236 | 1398 | _, matches = ip.complete(None, "import IPython.co", 17) |
|
1237 | 1399 | self.assertIn("IPython.core", matches) |
|
1238 | 1400 | self.assertNotIn("import IPython.core", matches) |
|
1239 | 1401 | self.assertNotIn("IPython.display", matches) |
|
1240 | 1402 | |
|
1241 | 1403 | def test_import_module_completer(self): |
|
1242 | 1404 | ip = get_ipython() |
|
1243 | 1405 | _, matches = ip.complete("i", "import i") |
|
1244 | 1406 | self.assertIn("io", matches) |
|
1245 | 1407 | self.assertNotIn("int", matches) |
|
1246 | 1408 | |
|
1247 | 1409 | def test_from_module_completer(self): |
|
1248 | 1410 | ip = get_ipython() |
|
1249 | 1411 | _, matches = ip.complete("B", "from io import B", 16) |
|
1250 | 1412 | self.assertIn("BytesIO", matches) |
|
1251 | 1413 | self.assertNotIn("BaseException", matches) |
|
1252 | 1414 | |
|
1253 | 1415 | def test_snake_case_completion(self): |
|
1254 | 1416 | ip = get_ipython() |
|
1255 | 1417 | ip.Completer.use_jedi = False |
|
1256 | 1418 | ip.user_ns["some_three"] = 3 |
|
1257 | 1419 | ip.user_ns["some_four"] = 4 |
|
1258 | 1420 | _, matches = ip.complete("s_", "print(s_f") |
|
1259 | 1421 | self.assertIn("some_three", matches) |
|
1260 | 1422 | self.assertIn("some_four", matches) |
|
1261 | 1423 | |
|
1262 | 1424 | def test_mix_terms(self): |
|
1263 | 1425 | ip = get_ipython() |
|
1264 | 1426 | from textwrap import dedent |
|
1265 | 1427 | |
|
1266 | 1428 | ip.Completer.use_jedi = False |
|
1267 | 1429 | ip.ex( |
|
1268 | 1430 | dedent( |
|
1269 | 1431 | """ |
|
1270 | 1432 | class Test: |
|
1271 | 1433 | def meth(self, meth_arg1): |
|
1272 | 1434 | print("meth") |
|
1273 | 1435 | |
|
1274 | 1436 | def meth_1(self, meth1_arg1, meth1_arg2): |
|
1275 | 1437 | print("meth1") |
|
1276 | 1438 | |
|
1277 | 1439 | def meth_2(self, meth2_arg1, meth2_arg2): |
|
1278 | 1440 | print("meth2") |
|
1279 | 1441 | test = Test() |
|
1280 | 1442 | """ |
|
1281 | 1443 | ) |
|
1282 | 1444 | ) |
|
1283 | 1445 | _, matches = ip.complete(None, "test.meth(") |
|
1284 | 1446 | self.assertIn("meth_arg1=", matches) |
|
1285 | 1447 | self.assertNotIn("meth2_arg1=", matches) |
|
1286 | 1448 | |
|
1287 | 1449 | def test_percent_symbol_restrict_to_magic_completions(self): |
|
1288 | 1450 | ip = get_ipython() |
|
1289 | 1451 | completer = ip.Completer |
|
1290 | 1452 | text = "%a" |
|
1291 | 1453 | |
|
1292 | 1454 | with provisionalcompleter(): |
|
1293 | 1455 | completer.use_jedi = True |
|
1294 | 1456 | completions = completer.completions(text, len(text)) |
|
1295 | 1457 | for c in completions: |
|
1296 | 1458 | self.assertEqual(c.text[0], "%") |
|
1297 | 1459 | |
|
1298 | 1460 | def test_fwd_unicode_restricts(self): |
|
1299 | 1461 | ip = get_ipython() |
|
1300 | 1462 | completer = ip.Completer |
|
1301 | 1463 | text = "\\ROMAN NUMERAL FIVE" |
|
1302 | 1464 | |
|
1303 | 1465 | with provisionalcompleter(): |
|
1304 | 1466 | completer.use_jedi = True |
|
1305 | 1467 | completions = [ |
|
1306 | 1468 | completion.text for completion in completer.completions(text, len(text)) |
|
1307 | 1469 | ] |
|
1308 | 1470 | self.assertEqual(completions, ["\u2164"]) |
|
1309 | 1471 | |
|
1310 | 1472 | def test_dict_key_restrict_to_dicts(self): |
|
1311 | 1473 | """Test that dict key suppresses non-dict completion items""" |
|
1312 | 1474 | ip = get_ipython() |
|
1313 | 1475 | c = ip.Completer |
|
1314 | 1476 | d = {"abc": None} |
|
1315 | 1477 | ip.user_ns["d"] = d |
|
1316 | 1478 | |
|
1317 | 1479 | text = 'd["a' |
|
1318 | 1480 | |
|
1319 | 1481 | def _(): |
|
1320 | 1482 | with provisionalcompleter(): |
|
1321 | 1483 | c.use_jedi = True |
|
1322 | 1484 | return [ |
|
1323 | 1485 | completion.text for completion in c.completions(text, len(text)) |
|
1324 | 1486 | ] |
|
1325 | 1487 | |
|
1326 | 1488 | completions = _() |
|
1327 | 1489 | self.assertEqual(completions, ["abc"]) |
|
1328 | 1490 | |
|
1329 | 1491 | # check that it can be disabled in granular manner: |
|
1330 | 1492 | cfg = Config() |
|
1331 | 1493 | cfg.IPCompleter.suppress_competing_matchers = { |
|
1332 | 1494 | "IPCompleter.dict_key_matcher": False |
|
1333 | 1495 | } |
|
1334 | 1496 | c.update_config(cfg) |
|
1335 | 1497 | |
|
1336 | 1498 | completions = _() |
|
1337 | 1499 | self.assertIn("abc", completions) |
|
1338 | 1500 | self.assertGreater(len(completions), 1) |
|
1339 | 1501 | |
|
1340 | 1502 | def test_matcher_suppression(self): |
|
1341 | 1503 | @completion_matcher(identifier="a_matcher") |
|
1342 | 1504 | def a_matcher(text): |
|
1343 | 1505 | return ["completion_a"] |
|
1344 | 1506 | |
|
1345 | 1507 | @completion_matcher(identifier="b_matcher", api_version=2) |
|
1346 | 1508 | def b_matcher(context: CompletionContext): |
|
1347 | 1509 | text = context.token |
|
1348 | 1510 | result = {"completions": [SimpleCompletion("completion_b")]} |
|
1349 | 1511 | |
|
1350 | 1512 | if text == "suppress c": |
|
1351 | 1513 | result["suppress"] = {"c_matcher"} |
|
1352 | 1514 | |
|
1353 | 1515 | if text.startswith("suppress all"): |
|
1354 | 1516 | result["suppress"] = True |
|
1355 | 1517 | if text == "suppress all but c": |
|
1356 | 1518 | result["do_not_suppress"] = {"c_matcher"} |
|
1357 | 1519 | if text == "suppress all but a": |
|
1358 | 1520 | result["do_not_suppress"] = {"a_matcher"} |
|
1359 | 1521 | |
|
1360 | 1522 | return result |
|
1361 | 1523 | |
|
1362 | 1524 | @completion_matcher(identifier="c_matcher") |
|
1363 | 1525 | def c_matcher(text): |
|
1364 | 1526 | return ["completion_c"] |
|
1365 | 1527 | |
|
1366 | 1528 | with custom_matchers([a_matcher, b_matcher, c_matcher]): |
|
1367 | 1529 | ip = get_ipython() |
|
1368 | 1530 | c = ip.Completer |
|
1369 | 1531 | |
|
1370 | 1532 | def _(text, expected): |
|
1371 | 1533 | c.use_jedi = False |
|
1372 | 1534 | s, matches = c.complete(text) |
|
1373 | 1535 | self.assertEqual(expected, matches) |
|
1374 | 1536 | |
|
1375 | 1537 | _("do not suppress", ["completion_a", "completion_b", "completion_c"]) |
|
1376 | 1538 | _("suppress all", ["completion_b"]) |
|
1377 | 1539 | _("suppress all but a", ["completion_a", "completion_b"]) |
|
1378 | 1540 | _("suppress all but c", ["completion_b", "completion_c"]) |
|
1379 | 1541 | |
|
1380 | 1542 | def configure(suppression_config): |
|
1381 | 1543 | cfg = Config() |
|
1382 | 1544 | cfg.IPCompleter.suppress_competing_matchers = suppression_config |
|
1383 | 1545 | c.update_config(cfg) |
|
1384 | 1546 | |
|
1385 | 1547 | # test that configuration takes priority over the run-time decisions |
|
1386 | 1548 | |
|
1387 | 1549 | configure(False) |
|
1388 | 1550 | _("suppress all", ["completion_a", "completion_b", "completion_c"]) |
|
1389 | 1551 | |
|
1390 | 1552 | configure({"b_matcher": False}) |
|
1391 | 1553 | _("suppress all", ["completion_a", "completion_b", "completion_c"]) |
|
1392 | 1554 | |
|
1393 | 1555 | configure({"a_matcher": False}) |
|
1394 | 1556 | _("suppress all", ["completion_b"]) |
|
1395 | 1557 | |
|
1396 | 1558 | configure({"b_matcher": True}) |
|
1397 | 1559 | _("do not suppress", ["completion_b"]) |
|
1398 | 1560 | |
|
1399 | 1561 | configure(True) |
|
1400 | 1562 | _("do not suppress", ["completion_a"]) |
|
1401 | 1563 | |
|
1402 | 1564 | def test_matcher_suppression_with_iterator(self): |
|
1403 | 1565 | @completion_matcher(identifier="matcher_returning_iterator") |
|
1404 | 1566 | def matcher_returning_iterator(text): |
|
1405 | 1567 | return iter(["completion_iter"]) |
|
1406 | 1568 | |
|
1407 | 1569 | @completion_matcher(identifier="matcher_returning_list") |
|
1408 | 1570 | def matcher_returning_list(text): |
|
1409 | 1571 | return ["completion_list"] |
|
1410 | 1572 | |
|
1411 | 1573 | with custom_matchers([matcher_returning_iterator, matcher_returning_list]): |
|
1412 | 1574 | ip = get_ipython() |
|
1413 | 1575 | c = ip.Completer |
|
1414 | 1576 | |
|
1415 | 1577 | def _(text, expected): |
|
1416 | 1578 | c.use_jedi = False |
|
1417 | 1579 | s, matches = c.complete(text) |
|
1418 | 1580 | self.assertEqual(expected, matches) |
|
1419 | 1581 | |
|
1420 | 1582 | def configure(suppression_config): |
|
1421 | 1583 | cfg = Config() |
|
1422 | 1584 | cfg.IPCompleter.suppress_competing_matchers = suppression_config |
|
1423 | 1585 | c.update_config(cfg) |
|
1424 | 1586 | |
|
1425 | 1587 | configure(False) |
|
1426 | 1588 | _("---", ["completion_iter", "completion_list"]) |
|
1427 | 1589 | |
|
1428 | 1590 | configure(True) |
|
1429 | 1591 | _("---", ["completion_iter"]) |
|
1430 | 1592 | |
|
1431 | 1593 | configure(None) |
|
1432 | 1594 | _("--", ["completion_iter", "completion_list"]) |
|
1433 | 1595 | |
|
1434 | 1596 | def test_matcher_suppression_with_jedi(self): |
|
1435 | 1597 | ip = get_ipython() |
|
1436 | 1598 | c = ip.Completer |
|
1437 | 1599 | c.use_jedi = True |
|
1438 | 1600 | |
|
1439 | 1601 | def configure(suppression_config): |
|
1440 | 1602 | cfg = Config() |
|
1441 | 1603 | cfg.IPCompleter.suppress_competing_matchers = suppression_config |
|
1442 | 1604 | c.update_config(cfg) |
|
1443 | 1605 | |
|
1444 | 1606 | def _(): |
|
1445 | 1607 | with provisionalcompleter(): |
|
1446 | 1608 | matches = [completion.text for completion in c.completions("dict.", 5)] |
|
1447 | 1609 | self.assertIn("keys", matches) |
|
1448 | 1610 | |
|
1449 | 1611 | configure(False) |
|
1450 | 1612 | _() |
|
1451 | 1613 | |
|
1452 | 1614 | configure(True) |
|
1453 | 1615 | _() |
|
1454 | 1616 | |
|
1455 | 1617 | configure(None) |
|
1456 | 1618 | _() |
|
1457 | 1619 | |
|
1458 | 1620 | def test_matcher_disabling(self): |
|
1459 | 1621 | @completion_matcher(identifier="a_matcher") |
|
1460 | 1622 | def a_matcher(text): |
|
1461 | 1623 | return ["completion_a"] |
|
1462 | 1624 | |
|
1463 | 1625 | @completion_matcher(identifier="b_matcher") |
|
1464 | 1626 | def b_matcher(text): |
|
1465 | 1627 | return ["completion_b"] |
|
1466 | 1628 | |
|
1467 | 1629 | def _(expected): |
|
1468 | 1630 | s, matches = c.complete("completion_") |
|
1469 | 1631 | self.assertEqual(expected, matches) |
|
1470 | 1632 | |
|
1471 | 1633 | with custom_matchers([a_matcher, b_matcher]): |
|
1472 | 1634 | ip = get_ipython() |
|
1473 | 1635 | c = ip.Completer |
|
1474 | 1636 | |
|
1475 | 1637 | _(["completion_a", "completion_b"]) |
|
1476 | 1638 | |
|
1477 | 1639 | cfg = Config() |
|
1478 | 1640 | cfg.IPCompleter.disable_matchers = ["b_matcher"] |
|
1479 | 1641 | c.update_config(cfg) |
|
1480 | 1642 | |
|
1481 | 1643 | _(["completion_a"]) |
|
1482 | 1644 | |
|
1483 | 1645 | cfg.IPCompleter.disable_matchers = [] |
|
1484 | 1646 | c.update_config(cfg) |
|
1485 | 1647 | |
|
1486 | 1648 | def test_matcher_priority(self): |
|
1487 | 1649 | @completion_matcher(identifier="a_matcher", priority=0, api_version=2) |
|
1488 | 1650 | def a_matcher(text): |
|
1489 | 1651 | return {"completions": [SimpleCompletion("completion_a")], "suppress": True} |
|
1490 | 1652 | |
|
1491 | 1653 | @completion_matcher(identifier="b_matcher", priority=2, api_version=2) |
|
1492 | 1654 | def b_matcher(text): |
|
1493 | 1655 | return {"completions": [SimpleCompletion("completion_b")], "suppress": True} |
|
1494 | 1656 | |
|
1495 | 1657 | def _(expected): |
|
1496 | 1658 | s, matches = c.complete("completion_") |
|
1497 | 1659 | self.assertEqual(expected, matches) |
|
1498 | 1660 | |
|
1499 | 1661 | with custom_matchers([a_matcher, b_matcher]): |
|
1500 | 1662 | ip = get_ipython() |
|
1501 | 1663 | c = ip.Completer |
|
1502 | 1664 | |
|
1503 | 1665 | _(["completion_b"]) |
|
1504 | 1666 | a_matcher.matcher_priority = 3 |
|
1505 | 1667 | _(["completion_a"]) |
|
1668 | ||
|
1669 | ||
|
1670 | @pytest.mark.parametrize( | |
|
1671 | "input, expected", | |
|
1672 | [ | |
|
1673 | ["1.234", "1.234"], | |
|
1674 | # should match signed numbers | |
|
1675 | ["+1", "+1"], | |
|
1676 | ["-1", "-1"], | |
|
1677 | ["-1.0", "-1.0"], | |
|
1678 | ["-1.", "-1."], | |
|
1679 | ["+1.", "+1."], | |
|
1680 | [".1", ".1"], | |
|
1681 | # should not match non-numbers | |
|
1682 | ["1..", None], | |
|
1683 | ["..", None], | |
|
1684 | [".1.", None], | |
|
1685 | # should match after comma | |
|
1686 | [",1", "1"], | |
|
1687 | [", 1", "1"], | |
|
1688 | [", .1", ".1"], | |
|
1689 | [", +.1", "+.1"], | |
|
1690 | # should not match after trailing spaces | |
|
1691 | [".1 ", None], | |
|
1692 | # some complex cases | |
|
1693 | ["0b_0011_1111_0100_1110", "0b_0011_1111_0100_1110"], | |
|
1694 | ["0xdeadbeef", "0xdeadbeef"], | |
|
1695 | ["0b_1110_0101", "0b_1110_0101"], | |
|
1696 | # should not match if in an operation | |
|
1697 | ["1 + 1", None], | |
|
1698 | [", 1 + 1", None], | |
|
1699 | ], | |
|
1700 | ) | |
|
1701 | def test_match_numeric_literal_for_dict_key(input, expected): | |
|
1702 | assert _match_number_in_dict_key_prefix(input) == expected |
@@ -1,597 +1,598 b'' | |||
|
1 | 1 | """Tests for autoreload extension. |
|
2 | 2 | """ |
|
3 | 3 | # ----------------------------------------------------------------------------- |
|
4 | 4 | # Copyright (c) 2012 IPython Development Team. |
|
5 | 5 | # |
|
6 | 6 | # Distributed under the terms of the Modified BSD License. |
|
7 | 7 | # |
|
8 | 8 | # The full license is in the file COPYING.txt, distributed with this software. |
|
9 | 9 | # ----------------------------------------------------------------------------- |
|
10 | 10 | |
|
11 | 11 | # ----------------------------------------------------------------------------- |
|
12 | 12 | # Imports |
|
13 | 13 | # ----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 | 15 | import os |
|
16 | 16 | import platform |
|
17 | 17 | import pytest |
|
18 | 18 | import sys |
|
19 | 19 | import tempfile |
|
20 | 20 | import textwrap |
|
21 | 21 | import shutil |
|
22 | 22 | import random |
|
23 | 23 | import time |
|
24 | 24 | from io import StringIO |
|
25 | 25 | |
|
26 | 26 | import IPython.testing.tools as tt |
|
27 | 27 | |
|
28 | 28 | from unittest import TestCase |
|
29 | 29 | |
|
30 | 30 | from IPython.extensions.autoreload import AutoreloadMagics |
|
31 | 31 | from IPython.core.events import EventManager, pre_run_cell |
|
32 | 32 | from IPython.testing.decorators import skipif_not_numpy |
|
33 | 33 | |
|
34 | 34 | if platform.python_implementation() == "PyPy": |
|
35 | 35 | pytest.skip( |
|
36 | 36 | "Current autoreload implementation is extremely slow on PyPy", |
|
37 | 37 | allow_module_level=True, |
|
38 | 38 | ) |
|
39 | 39 | |
|
40 | 40 | # ----------------------------------------------------------------------------- |
|
41 | 41 | # Test fixture |
|
42 | 42 | # ----------------------------------------------------------------------------- |
|
43 | 43 | |
|
44 | 44 | noop = lambda *a, **kw: None |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | class FakeShell: |
|
48 | 48 | def __init__(self): |
|
49 | 49 | self.ns = {} |
|
50 | 50 | self.user_ns = self.ns |
|
51 | 51 | self.user_ns_hidden = {} |
|
52 | 52 | self.events = EventManager(self, {"pre_run_cell", pre_run_cell}) |
|
53 | 53 | self.auto_magics = AutoreloadMagics(shell=self) |
|
54 | 54 | self.events.register("pre_run_cell", self.auto_magics.pre_run_cell) |
|
55 | 55 | |
|
56 | 56 | register_magics = set_hook = noop |
|
57 | 57 | |
|
58 | 58 | def run_code(self, code): |
|
59 | 59 | self.events.trigger("pre_run_cell") |
|
60 | 60 | exec(code, self.user_ns) |
|
61 | 61 | self.auto_magics.post_execute_hook() |
|
62 | 62 | |
|
63 | 63 | def push(self, items): |
|
64 | 64 | self.ns.update(items) |
|
65 | 65 | |
|
66 | 66 | def magic_autoreload(self, parameter): |
|
67 | 67 | self.auto_magics.autoreload(parameter) |
|
68 | 68 | |
|
69 | 69 | def magic_aimport(self, parameter, stream=None): |
|
70 | 70 | self.auto_magics.aimport(parameter, stream=stream) |
|
71 | 71 | self.auto_magics.post_execute_hook() |
|
72 | 72 | |
|
73 | 73 | |
|
74 | 74 | class Fixture(TestCase): |
|
75 | 75 | """Fixture for creating test module files""" |
|
76 | 76 | |
|
77 | 77 | test_dir = None |
|
78 | 78 | old_sys_path = None |
|
79 | 79 | filename_chars = "abcdefghijklmopqrstuvwxyz0123456789" |
|
80 | 80 | |
|
81 | 81 | def setUp(self): |
|
82 | 82 | self.test_dir = tempfile.mkdtemp() |
|
83 | 83 | self.old_sys_path = list(sys.path) |
|
84 | 84 | sys.path.insert(0, self.test_dir) |
|
85 | 85 | self.shell = FakeShell() |
|
86 | 86 | |
|
87 | 87 | def tearDown(self): |
|
88 | 88 | shutil.rmtree(self.test_dir) |
|
89 | 89 | sys.path = self.old_sys_path |
|
90 | 90 | |
|
91 | 91 | self.test_dir = None |
|
92 | 92 | self.old_sys_path = None |
|
93 | 93 | self.shell = None |
|
94 | 94 | |
|
95 | 95 | def get_module(self): |
|
96 | 96 | module_name = "tmpmod_" + "".join(random.sample(self.filename_chars, 20)) |
|
97 | 97 | if module_name in sys.modules: |
|
98 | 98 | del sys.modules[module_name] |
|
99 | 99 | file_name = os.path.join(self.test_dir, module_name + ".py") |
|
100 | 100 | return module_name, file_name |
|
101 | 101 | |
|
102 | 102 | def write_file(self, filename, content): |
|
103 | 103 | """ |
|
104 | 104 | Write a file, and force a timestamp difference of at least one second |
|
105 | 105 | |
|
106 | 106 | Notes |
|
107 | 107 | ----- |
|
108 | 108 | Python's .pyc files record the timestamp of their compilation |
|
109 | 109 | with a time resolution of one second. |
|
110 | 110 | |
|
111 | 111 | Therefore, we need to force a timestamp difference between .py |
|
112 | 112 | and .pyc, without having the .py file be timestamped in the |
|
113 | 113 | future, and without changing the timestamp of the .pyc file |
|
114 | 114 | (because that is stored in the file). The only reliable way |
|
115 | 115 | to achieve this seems to be to sleep. |
|
116 | 116 | """ |
|
117 | 117 | content = textwrap.dedent(content) |
|
118 | 118 | # Sleep one second + eps |
|
119 | 119 | time.sleep(1.05) |
|
120 | 120 | |
|
121 | 121 | # Write |
|
122 | 122 | with open(filename, "w", encoding="utf-8") as f: |
|
123 | 123 | f.write(content) |
|
124 | 124 | |
|
125 | 125 | def new_module(self, code): |
|
126 | 126 | code = textwrap.dedent(code) |
|
127 | 127 | mod_name, mod_fn = self.get_module() |
|
128 | 128 | with open(mod_fn, "w", encoding="utf-8") as f: |
|
129 | 129 | f.write(code) |
|
130 | 130 | return mod_name, mod_fn |
|
131 | 131 | |
|
132 | 132 | |
|
133 | 133 | # ----------------------------------------------------------------------------- |
|
134 | 134 | # Test automatic reloading |
|
135 | 135 | # ----------------------------------------------------------------------------- |
|
136 | 136 | |
|
137 | 137 | |
|
138 | 138 | def pickle_get_current_class(obj): |
|
139 | 139 | """ |
|
140 | 140 | Original issue comes from pickle; hence the name. |
|
141 | 141 | """ |
|
142 | 142 | name = obj.__class__.__name__ |
|
143 | 143 | module_name = getattr(obj, "__module__", None) |
|
144 | 144 | obj2 = sys.modules[module_name] |
|
145 | 145 | for subpath in name.split("."): |
|
146 | 146 | obj2 = getattr(obj2, subpath) |
|
147 | 147 | return obj2 |
|
148 | 148 | |
|
149 | 149 | |
|
150 | 150 | class TestAutoreload(Fixture): |
|
151 | 151 | def test_reload_enums(self): |
|
152 | 152 | mod_name, mod_fn = self.new_module( |
|
153 | 153 | textwrap.dedent( |
|
154 | 154 | """ |
|
155 | 155 | from enum import Enum |
|
156 | 156 | class MyEnum(Enum): |
|
157 | 157 | A = 'A' |
|
158 | 158 | B = 'B' |
|
159 | 159 | """ |
|
160 | 160 | ) |
|
161 | 161 | ) |
|
162 | 162 | self.shell.magic_autoreload("2") |
|
163 | 163 | self.shell.magic_aimport(mod_name) |
|
164 | 164 | self.write_file( |
|
165 | 165 | mod_fn, |
|
166 | 166 | textwrap.dedent( |
|
167 | 167 | """ |
|
168 | 168 | from enum import Enum |
|
169 | 169 | class MyEnum(Enum): |
|
170 | 170 | A = 'A' |
|
171 | 171 | B = 'B' |
|
172 | 172 | C = 'C' |
|
173 | 173 | """ |
|
174 | 174 | ), |
|
175 | 175 | ) |
|
176 | 176 | with tt.AssertNotPrints( |
|
177 | 177 | ("[autoreload of %s failed:" % mod_name), channel="stderr" |
|
178 | 178 | ): |
|
179 | 179 | self.shell.run_code("pass") # trigger another reload |
|
180 | 180 | |
|
181 | 181 | def test_reload_class_type(self): |
|
182 | 182 | self.shell.magic_autoreload("2") |
|
183 | 183 | mod_name, mod_fn = self.new_module( |
|
184 | 184 | """ |
|
185 | 185 | class Test(): |
|
186 | 186 | def meth(self): |
|
187 | 187 | return "old" |
|
188 | 188 | """ |
|
189 | 189 | ) |
|
190 | 190 | assert "test" not in self.shell.ns |
|
191 | 191 | assert "result" not in self.shell.ns |
|
192 | 192 | |
|
193 | 193 | self.shell.run_code("from %s import Test" % mod_name) |
|
194 | 194 | self.shell.run_code("test = Test()") |
|
195 | 195 | |
|
196 | 196 | self.write_file( |
|
197 | 197 | mod_fn, |
|
198 | 198 | """ |
|
199 | 199 | class Test(): |
|
200 | 200 | def meth(self): |
|
201 | 201 | return "new" |
|
202 | 202 | """, |
|
203 | 203 | ) |
|
204 | 204 | |
|
205 | 205 | test_object = self.shell.ns["test"] |
|
206 | 206 | |
|
207 | 207 | # important to trigger autoreload logic ! |
|
208 | 208 | self.shell.run_code("pass") |
|
209 | 209 | |
|
210 | 210 | test_class = pickle_get_current_class(test_object) |
|
211 | 211 | assert isinstance(test_object, test_class) |
|
212 | 212 | |
|
213 | 213 | # extra check. |
|
214 | 214 | self.shell.run_code("import pickle") |
|
215 | 215 | self.shell.run_code("p = pickle.dumps(test)") |
|
216 | 216 | |
|
217 | 217 | def test_reload_class_attributes(self): |
|
218 | 218 | self.shell.magic_autoreload("2") |
|
219 | 219 | mod_name, mod_fn = self.new_module( |
|
220 | 220 | textwrap.dedent( |
|
221 | 221 | """ |
|
222 | 222 | class MyClass: |
|
223 | 223 | |
|
224 | 224 | def __init__(self, a=10): |
|
225 | 225 | self.a = a |
|
226 | 226 | self.b = 22 |
|
227 | 227 | # self.toto = 33 |
|
228 | 228 | |
|
229 | 229 | def square(self): |
|
230 | 230 | print('compute square') |
|
231 | 231 | return self.a*self.a |
|
232 | 232 | """ |
|
233 | 233 | ) |
|
234 | 234 | ) |
|
235 | 235 | self.shell.run_code("from %s import MyClass" % mod_name) |
|
236 | 236 | self.shell.run_code("first = MyClass(5)") |
|
237 | 237 | self.shell.run_code("first.square()") |
|
238 | 238 | with self.assertRaises(AttributeError): |
|
239 | 239 | self.shell.run_code("first.cube()") |
|
240 | 240 | with self.assertRaises(AttributeError): |
|
241 | 241 | self.shell.run_code("first.power(5)") |
|
242 | 242 | self.shell.run_code("first.b") |
|
243 | 243 | with self.assertRaises(AttributeError): |
|
244 | 244 | self.shell.run_code("first.toto") |
|
245 | 245 | |
|
246 | 246 | # remove square, add power |
|
247 | 247 | |
|
248 | 248 | self.write_file( |
|
249 | 249 | mod_fn, |
|
250 | 250 | textwrap.dedent( |
|
251 | 251 | """ |
|
252 | 252 | class MyClass: |
|
253 | 253 | |
|
254 | 254 | def __init__(self, a=10): |
|
255 | 255 | self.a = a |
|
256 | 256 | self.b = 11 |
|
257 | 257 | |
|
258 | 258 | def power(self, p): |
|
259 | 259 | print('compute power '+str(p)) |
|
260 | 260 | return self.a**p |
|
261 | 261 | """ |
|
262 | 262 | ), |
|
263 | 263 | ) |
|
264 | 264 | |
|
265 | 265 | self.shell.run_code("second = MyClass(5)") |
|
266 | 266 | |
|
267 | 267 | for object_name in {"first", "second"}: |
|
268 | 268 | self.shell.run_code(f"{object_name}.power(5)") |
|
269 | 269 | with self.assertRaises(AttributeError): |
|
270 | 270 | self.shell.run_code(f"{object_name}.cube()") |
|
271 | 271 | with self.assertRaises(AttributeError): |
|
272 | 272 | self.shell.run_code(f"{object_name}.square()") |
|
273 | 273 | self.shell.run_code(f"{object_name}.b") |
|
274 | 274 | self.shell.run_code(f"{object_name}.a") |
|
275 | 275 | with self.assertRaises(AttributeError): |
|
276 | 276 | self.shell.run_code(f"{object_name}.toto") |
|
277 | 277 | |
|
278 | 278 | @skipif_not_numpy |
|
279 | 279 | def test_comparing_numpy_structures(self): |
|
280 | 280 | self.shell.magic_autoreload("2") |
|
281 | 281 | mod_name, mod_fn = self.new_module( |
|
282 | 282 | textwrap.dedent( |
|
283 | 283 | """ |
|
284 | 284 | import numpy as np |
|
285 | 285 | class MyClass: |
|
286 | 286 | a = (np.array((.1, .2)), |
|
287 | 287 | np.array((.2, .3))) |
|
288 | 288 | """ |
|
289 | 289 | ) |
|
290 | 290 | ) |
|
291 | 291 | self.shell.run_code("from %s import MyClass" % mod_name) |
|
292 | 292 | self.shell.run_code("first = MyClass()") |
|
293 | 293 | |
|
294 | 294 | # change property `a` |
|
295 | 295 | self.write_file( |
|
296 | 296 | mod_fn, |
|
297 | 297 | textwrap.dedent( |
|
298 | 298 | """ |
|
299 | 299 | import numpy as np |
|
300 | 300 | class MyClass: |
|
301 | 301 | a = (np.array((.3, .4)), |
|
302 | 302 | np.array((.5, .6))) |
|
303 | 303 | """ |
|
304 | 304 | ), |
|
305 | 305 | ) |
|
306 | 306 | |
|
307 | 307 | with tt.AssertNotPrints( |
|
308 | 308 | ("[autoreload of %s failed:" % mod_name), channel="stderr" |
|
309 | 309 | ): |
|
310 | 310 | self.shell.run_code("pass") # trigger another reload |
|
311 | 311 | |
|
312 | 312 | def test_autoload_newly_added_objects(self): |
|
313 | 313 | self.shell.magic_autoreload("3") |
|
314 | 314 | mod_code = """ |
|
315 | 315 | def func1(): pass |
|
316 | 316 | """ |
|
317 | 317 | mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code)) |
|
318 | 318 | self.shell.run_code(f"from {mod_name} import *") |
|
319 | 319 | self.shell.run_code("func1()") |
|
320 | 320 | with self.assertRaises(NameError): |
|
321 | 321 | self.shell.run_code("func2()") |
|
322 | 322 | with self.assertRaises(NameError): |
|
323 | 323 | self.shell.run_code("t = Test()") |
|
324 | 324 | with self.assertRaises(NameError): |
|
325 | 325 | self.shell.run_code("number") |
|
326 | 326 | |
|
327 | 327 | # ----------- TEST NEW OBJ LOADED -------------------------- |
|
328 | 328 | |
|
329 | 329 | new_code = """ |
|
330 | 330 | def func1(): pass |
|
331 | 331 | def func2(): pass |
|
332 | 332 | class Test: pass |
|
333 | 333 | number = 0 |
|
334 | 334 | from enum import Enum |
|
335 | 335 | class TestEnum(Enum): |
|
336 | 336 | A = 'a' |
|
337 | 337 | """ |
|
338 | 338 | self.write_file(mod_fn, textwrap.dedent(new_code)) |
|
339 | 339 | |
|
340 | 340 | # test function now exists in shell's namespace namespace |
|
341 | 341 | self.shell.run_code("func2()") |
|
342 | 342 | # test function now exists in module's dict |
|
343 | 343 | self.shell.run_code(f"import sys; sys.modules['{mod_name}'].func2()") |
|
344 | 344 | # test class now exists |
|
345 | 345 | self.shell.run_code("t = Test()") |
|
346 | 346 | # test global built-in var now exists |
|
347 | 347 | self.shell.run_code("number") |
|
348 | 348 | # test the enumerations gets loaded successfully |
|
349 | 349 | self.shell.run_code("TestEnum.A") |
|
350 | 350 | |
|
351 | 351 | # ----------- TEST NEW OBJ CAN BE CHANGED -------------------- |
|
352 | 352 | |
|
353 | 353 | new_code = """ |
|
354 | 354 | def func1(): return 'changed' |
|
355 | 355 | def func2(): return 'changed' |
|
356 | 356 | class Test: |
|
357 | 357 | def new_func(self): |
|
358 | 358 | return 'changed' |
|
359 | 359 | number = 1 |
|
360 | 360 | from enum import Enum |
|
361 | 361 | class TestEnum(Enum): |
|
362 | 362 | A = 'a' |
|
363 | 363 | B = 'added' |
|
364 | 364 | """ |
|
365 | 365 | self.write_file(mod_fn, textwrap.dedent(new_code)) |
|
366 | 366 | self.shell.run_code("assert func1() == 'changed'") |
|
367 | 367 | self.shell.run_code("assert func2() == 'changed'") |
|
368 | 368 | self.shell.run_code("t = Test(); assert t.new_func() == 'changed'") |
|
369 | 369 | self.shell.run_code("assert number == 1") |
|
370 | if sys.version_info < (3, 12): | |
|
370 | 371 | self.shell.run_code("assert TestEnum.B.value == 'added'") |
|
371 | 372 | |
|
372 | 373 | # ----------- TEST IMPORT FROM MODULE -------------------------- |
|
373 | 374 | |
|
374 | 375 | new_mod_code = """ |
|
375 | 376 | from enum import Enum |
|
376 | 377 | class Ext(Enum): |
|
377 | 378 | A = 'ext' |
|
378 | 379 | def ext_func(): |
|
379 | 380 | return 'ext' |
|
380 | 381 | class ExtTest: |
|
381 | 382 | def meth(self): |
|
382 | 383 | return 'ext' |
|
383 | 384 | ext_int = 2 |
|
384 | 385 | """ |
|
385 | 386 | new_mod_name, new_mod_fn = self.new_module(textwrap.dedent(new_mod_code)) |
|
386 | 387 | current_mod_code = f""" |
|
387 | 388 | from {new_mod_name} import * |
|
388 | 389 | """ |
|
389 | 390 | self.write_file(mod_fn, textwrap.dedent(current_mod_code)) |
|
390 | 391 | self.shell.run_code("assert Ext.A.value == 'ext'") |
|
391 | 392 | self.shell.run_code("assert ext_func() == 'ext'") |
|
392 | 393 | self.shell.run_code("t = ExtTest(); assert t.meth() == 'ext'") |
|
393 | 394 | self.shell.run_code("assert ext_int == 2") |
|
394 | 395 | |
|
395 | 396 | def _check_smoketest(self, use_aimport=True): |
|
396 | 397 | """ |
|
397 | 398 | Functional test for the automatic reloader using either |
|
398 | 399 | '%autoreload 1' or '%autoreload 2' |
|
399 | 400 | """ |
|
400 | 401 | |
|
401 | 402 | mod_name, mod_fn = self.new_module( |
|
402 | 403 | """ |
|
403 | 404 | x = 9 |
|
404 | 405 | |
|
405 | 406 | z = 123 # this item will be deleted |
|
406 | 407 | |
|
407 | 408 | def foo(y): |
|
408 | 409 | return y + 3 |
|
409 | 410 | |
|
410 | 411 | class Baz(object): |
|
411 | 412 | def __init__(self, x): |
|
412 | 413 | self.x = x |
|
413 | 414 | def bar(self, y): |
|
414 | 415 | return self.x + y |
|
415 | 416 | @property |
|
416 | 417 | def quux(self): |
|
417 | 418 | return 42 |
|
418 | 419 | def zzz(self): |
|
419 | 420 | '''This method will be deleted below''' |
|
420 | 421 | return 99 |
|
421 | 422 | |
|
422 | 423 | class Bar: # old-style class: weakref doesn't work for it on Python < 2.7 |
|
423 | 424 | def foo(self): |
|
424 | 425 | return 1 |
|
425 | 426 | """ |
|
426 | 427 | ) |
|
427 | 428 | |
|
428 | 429 | # |
|
429 | 430 | # Import module, and mark for reloading |
|
430 | 431 | # |
|
431 | 432 | if use_aimport: |
|
432 | 433 | self.shell.magic_autoreload("1") |
|
433 | 434 | self.shell.magic_aimport(mod_name) |
|
434 | 435 | stream = StringIO() |
|
435 | 436 | self.shell.magic_aimport("", stream=stream) |
|
436 | 437 | self.assertIn(("Modules to reload:\n%s" % mod_name), stream.getvalue()) |
|
437 | 438 | |
|
438 | 439 | with self.assertRaises(ImportError): |
|
439 | 440 | self.shell.magic_aimport("tmpmod_as318989e89ds") |
|
440 | 441 | else: |
|
441 | 442 | self.shell.magic_autoreload("2") |
|
442 | 443 | self.shell.run_code("import %s" % mod_name) |
|
443 | 444 | stream = StringIO() |
|
444 | 445 | self.shell.magic_aimport("", stream=stream) |
|
445 | 446 | self.assertTrue( |
|
446 | 447 | "Modules to reload:\nall-except-skipped" in stream.getvalue() |
|
447 | 448 | ) |
|
448 | 449 | self.assertIn(mod_name, self.shell.ns) |
|
449 | 450 | |
|
450 | 451 | mod = sys.modules[mod_name] |
|
451 | 452 | |
|
452 | 453 | # |
|
453 | 454 | # Test module contents |
|
454 | 455 | # |
|
455 | 456 | old_foo = mod.foo |
|
456 | 457 | old_obj = mod.Baz(9) |
|
457 | 458 | old_obj2 = mod.Bar() |
|
458 | 459 | |
|
459 | 460 | def check_module_contents(): |
|
460 | 461 | self.assertEqual(mod.x, 9) |
|
461 | 462 | self.assertEqual(mod.z, 123) |
|
462 | 463 | |
|
463 | 464 | self.assertEqual(old_foo(0), 3) |
|
464 | 465 | self.assertEqual(mod.foo(0), 3) |
|
465 | 466 | |
|
466 | 467 | obj = mod.Baz(9) |
|
467 | 468 | self.assertEqual(old_obj.bar(1), 10) |
|
468 | 469 | self.assertEqual(obj.bar(1), 10) |
|
469 | 470 | self.assertEqual(obj.quux, 42) |
|
470 | 471 | self.assertEqual(obj.zzz(), 99) |
|
471 | 472 | |
|
472 | 473 | obj2 = mod.Bar() |
|
473 | 474 | self.assertEqual(old_obj2.foo(), 1) |
|
474 | 475 | self.assertEqual(obj2.foo(), 1) |
|
475 | 476 | |
|
476 | 477 | check_module_contents() |
|
477 | 478 | |
|
478 | 479 | # |
|
479 | 480 | # Simulate a failed reload: no reload should occur and exactly |
|
480 | 481 | # one error message should be printed |
|
481 | 482 | # |
|
482 | 483 | self.write_file( |
|
483 | 484 | mod_fn, |
|
484 | 485 | """ |
|
485 | 486 | a syntax error |
|
486 | 487 | """, |
|
487 | 488 | ) |
|
488 | 489 | |
|
489 | 490 | with tt.AssertPrints( |
|
490 | 491 | ("[autoreload of %s failed:" % mod_name), channel="stderr" |
|
491 | 492 | ): |
|
492 | 493 | self.shell.run_code("pass") # trigger reload |
|
493 | 494 | with tt.AssertNotPrints( |
|
494 | 495 | ("[autoreload of %s failed:" % mod_name), channel="stderr" |
|
495 | 496 | ): |
|
496 | 497 | self.shell.run_code("pass") # trigger another reload |
|
497 | 498 | check_module_contents() |
|
498 | 499 | |
|
499 | 500 | # |
|
500 | 501 | # Rewrite module (this time reload should succeed) |
|
501 | 502 | # |
|
502 | 503 | self.write_file( |
|
503 | 504 | mod_fn, |
|
504 | 505 | """ |
|
505 | 506 | x = 10 |
|
506 | 507 | |
|
507 | 508 | def foo(y): |
|
508 | 509 | return y + 4 |
|
509 | 510 | |
|
510 | 511 | class Baz(object): |
|
511 | 512 | def __init__(self, x): |
|
512 | 513 | self.x = x |
|
513 | 514 | def bar(self, y): |
|
514 | 515 | return self.x + y + 1 |
|
515 | 516 | @property |
|
516 | 517 | def quux(self): |
|
517 | 518 | return 43 |
|
518 | 519 | |
|
519 | 520 | class Bar: # old-style class |
|
520 | 521 | def foo(self): |
|
521 | 522 | return 2 |
|
522 | 523 | """, |
|
523 | 524 | ) |
|
524 | 525 | |
|
525 | 526 | def check_module_contents(): |
|
526 | 527 | self.assertEqual(mod.x, 10) |
|
527 | 528 | self.assertFalse(hasattr(mod, "z")) |
|
528 | 529 | |
|
529 | 530 | self.assertEqual(old_foo(0), 4) # superreload magic! |
|
530 | 531 | self.assertEqual(mod.foo(0), 4) |
|
531 | 532 | |
|
532 | 533 | obj = mod.Baz(9) |
|
533 | 534 | self.assertEqual(old_obj.bar(1), 11) # superreload magic! |
|
534 | 535 | self.assertEqual(obj.bar(1), 11) |
|
535 | 536 | |
|
536 | 537 | self.assertEqual(old_obj.quux, 43) |
|
537 | 538 | self.assertEqual(obj.quux, 43) |
|
538 | 539 | |
|
539 | 540 | self.assertFalse(hasattr(old_obj, "zzz")) |
|
540 | 541 | self.assertFalse(hasattr(obj, "zzz")) |
|
541 | 542 | |
|
542 | 543 | obj2 = mod.Bar() |
|
543 | 544 | self.assertEqual(old_obj2.foo(), 2) |
|
544 | 545 | self.assertEqual(obj2.foo(), 2) |
|
545 | 546 | |
|
546 | 547 | self.shell.run_code("pass") # trigger reload |
|
547 | 548 | check_module_contents() |
|
548 | 549 | |
|
549 | 550 | # |
|
550 | 551 | # Another failure case: deleted file (shouldn't reload) |
|
551 | 552 | # |
|
552 | 553 | os.unlink(mod_fn) |
|
553 | 554 | |
|
554 | 555 | self.shell.run_code("pass") # trigger reload |
|
555 | 556 | check_module_contents() |
|
556 | 557 | |
|
557 | 558 | # |
|
558 | 559 | # Disable autoreload and rewrite module: no reload should occur |
|
559 | 560 | # |
|
560 | 561 | if use_aimport: |
|
561 | 562 | self.shell.magic_aimport("-" + mod_name) |
|
562 | 563 | stream = StringIO() |
|
563 | 564 | self.shell.magic_aimport("", stream=stream) |
|
564 | 565 | self.assertTrue(("Modules to skip:\n%s" % mod_name) in stream.getvalue()) |
|
565 | 566 | |
|
566 | 567 | # This should succeed, although no such module exists |
|
567 | 568 | self.shell.magic_aimport("-tmpmod_as318989e89ds") |
|
568 | 569 | else: |
|
569 | 570 | self.shell.magic_autoreload("0") |
|
570 | 571 | |
|
571 | 572 | self.write_file( |
|
572 | 573 | mod_fn, |
|
573 | 574 | """ |
|
574 | 575 | x = -99 |
|
575 | 576 | """, |
|
576 | 577 | ) |
|
577 | 578 | |
|
578 | 579 | self.shell.run_code("pass") # trigger reload |
|
579 | 580 | self.shell.run_code("pass") |
|
580 | 581 | check_module_contents() |
|
581 | 582 | |
|
582 | 583 | # |
|
583 | 584 | # Re-enable autoreload: reload should now occur |
|
584 | 585 | # |
|
585 | 586 | if use_aimport: |
|
586 | 587 | self.shell.magic_aimport(mod_name) |
|
587 | 588 | else: |
|
588 | 589 | self.shell.magic_autoreload("") |
|
589 | 590 | |
|
590 | 591 | self.shell.run_code("pass") # trigger reload |
|
591 | 592 | self.assertEqual(mod.x, -99) |
|
592 | 593 | |
|
593 | 594 | def test_smoketest_aimport(self): |
|
594 | 595 | self._check_smoketest(use_aimport=True) |
|
595 | 596 | |
|
596 | 597 | def test_smoketest_autoreload(self): |
|
597 | 598 | self._check_smoketest(use_aimport=False) |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
|
1 | NO CONTENT: modified file | |
The requested commit or file is too big and content was truncated. Show full diff |
General Comments 0
You need to be logged in to leave comments.
Login now