Show More
@@ -1,1032 +1,1031 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """Tools for inspecting Python objects. |
|
2 | """Tools for inspecting Python objects. | |
3 |
|
3 | |||
4 | Uses syntax highlighting for presenting the various information elements. |
|
4 | Uses syntax highlighting for presenting the various information elements. | |
5 |
|
5 | |||
6 | Similar in spirit to the inspect module, but all calls take a name argument to |
|
6 | Similar in spirit to the inspect module, but all calls take a name argument to | |
7 | reference the name under which an object is being read. |
|
7 | reference the name under which an object is being read. | |
8 | """ |
|
8 | """ | |
9 |
|
9 | |||
10 | # Copyright (c) IPython Development Team. |
|
10 | # Copyright (c) IPython Development Team. | |
11 | # Distributed under the terms of the Modified BSD License. |
|
11 | # Distributed under the terms of the Modified BSD License. | |
12 |
|
12 | |||
13 | __all__ = ['Inspector','InspectColors'] |
|
13 | __all__ = ['Inspector','InspectColors'] | |
14 |
|
14 | |||
15 | # stdlib modules |
|
15 | # stdlib modules | |
16 | import ast |
|
16 | import ast | |
17 | import inspect |
|
17 | import inspect | |
18 | from inspect import signature |
|
18 | from inspect import signature | |
19 | import linecache |
|
19 | import linecache | |
20 | import warnings |
|
20 | import warnings | |
21 | import os |
|
21 | import os | |
22 | from textwrap import dedent |
|
22 | from textwrap import dedent | |
23 | import types |
|
23 | import types | |
24 | import io as stdlib_io |
|
24 | import io as stdlib_io | |
25 |
|
25 | |||
26 | from typing import Union |
|
26 | from typing import Union | |
27 |
|
27 | |||
28 | # IPython's own |
|
28 | # IPython's own | |
29 | from IPython.core import page |
|
29 | from IPython.core import page | |
30 | from IPython.lib.pretty import pretty |
|
30 | from IPython.lib.pretty import pretty | |
31 | from IPython.testing.skipdoctest import skip_doctest |
|
31 | from IPython.testing.skipdoctest import skip_doctest | |
32 | from IPython.utils import PyColorize |
|
32 | from IPython.utils import PyColorize | |
33 | from IPython.utils import openpy |
|
33 | from IPython.utils import openpy | |
34 | from IPython.utils import py3compat |
|
34 | from IPython.utils import py3compat | |
35 | from IPython.utils.dir2 import safe_hasattr |
|
35 | from IPython.utils.dir2 import safe_hasattr | |
36 | from IPython.utils.path import compress_user |
|
36 | from IPython.utils.path import compress_user | |
37 | from IPython.utils.text import indent |
|
37 | from IPython.utils.text import indent | |
38 | from IPython.utils.wildcard import list_namespace |
|
38 | from IPython.utils.wildcard import list_namespace | |
39 | from IPython.utils.wildcard import typestr2type |
|
39 | from IPython.utils.wildcard import typestr2type | |
40 | from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable |
|
40 | from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable | |
41 | from IPython.utils.py3compat import cast_unicode |
|
41 | from IPython.utils.py3compat import cast_unicode | |
42 | from IPython.utils.colorable import Colorable |
|
42 | from IPython.utils.colorable import Colorable | |
43 | from IPython.utils.decorators import undoc |
|
43 | from IPython.utils.decorators import undoc | |
44 |
|
44 | |||
45 | from pygments import highlight |
|
45 | from pygments import highlight | |
46 | from pygments.lexers import PythonLexer |
|
46 | from pygments.lexers import PythonLexer | |
47 | from pygments.formatters import HtmlFormatter |
|
47 | from pygments.formatters import HtmlFormatter | |
48 |
|
48 | |||
49 | def pylight(code): |
|
49 | def pylight(code): | |
50 | return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True)) |
|
50 | return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True)) | |
51 |
|
51 | |||
52 | # builtin docstrings to ignore |
|
52 | # builtin docstrings to ignore | |
53 | _func_call_docstring = types.FunctionType.__call__.__doc__ |
|
53 | _func_call_docstring = types.FunctionType.__call__.__doc__ | |
54 | _object_init_docstring = object.__init__.__doc__ |
|
54 | _object_init_docstring = object.__init__.__doc__ | |
55 | _builtin_type_docstrings = { |
|
55 | _builtin_type_docstrings = { | |
56 | inspect.getdoc(t) for t in (types.ModuleType, types.MethodType, |
|
56 | inspect.getdoc(t) for t in (types.ModuleType, types.MethodType, | |
57 | types.FunctionType, property) |
|
57 | types.FunctionType, property) | |
58 | } |
|
58 | } | |
59 |
|
59 | |||
60 | _builtin_func_type = type(all) |
|
60 | _builtin_func_type = type(all) | |
61 | _builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions |
|
61 | _builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions | |
62 | #**************************************************************************** |
|
62 | #**************************************************************************** | |
63 | # Builtin color schemes |
|
63 | # Builtin color schemes | |
64 |
|
64 | |||
65 | Colors = TermColors # just a shorthand |
|
65 | Colors = TermColors # just a shorthand | |
66 |
|
66 | |||
67 | InspectColors = PyColorize.ANSICodeColors |
|
67 | InspectColors = PyColorize.ANSICodeColors | |
68 |
|
68 | |||
69 | #**************************************************************************** |
|
69 | #**************************************************************************** | |
70 | # Auxiliary functions and objects |
|
70 | # Auxiliary functions and objects | |
71 |
|
71 | |||
72 | # See the messaging spec for the definition of all these fields. This list |
|
72 | # See the messaging spec for the definition of all these fields. This list | |
73 | # effectively defines the order of display |
|
73 | # effectively defines the order of display | |
74 | info_fields = ['type_name', 'base_class', 'string_form', 'namespace', |
|
74 | info_fields = ['type_name', 'base_class', 'string_form', 'namespace', | |
75 | 'length', 'file', 'definition', 'docstring', 'source', |
|
75 | 'length', 'file', 'definition', 'docstring', 'source', | |
76 | 'init_definition', 'class_docstring', 'init_docstring', |
|
76 | 'init_definition', 'class_docstring', 'init_docstring', | |
77 | 'call_def', 'call_docstring', |
|
77 | 'call_def', 'call_docstring', | |
78 | # These won't be printed but will be used to determine how to |
|
78 | # These won't be printed but will be used to determine how to | |
79 | # format the object |
|
79 | # format the object | |
80 | 'ismagic', 'isalias', 'isclass', 'found', 'name' |
|
80 | 'ismagic', 'isalias', 'isclass', 'found', 'name' | |
81 | ] |
|
81 | ] | |
82 |
|
82 | |||
83 |
|
83 | |||
84 | def object_info(**kw): |
|
84 | def object_info(**kw): | |
85 | """Make an object info dict with all fields present.""" |
|
85 | """Make an object info dict with all fields present.""" | |
86 | infodict = {k:None for k in info_fields} |
|
86 | infodict = {k:None for k in info_fields} | |
87 | infodict.update(kw) |
|
87 | infodict.update(kw) | |
88 | return infodict |
|
88 | return infodict | |
89 |
|
89 | |||
90 |
|
90 | |||
91 | def get_encoding(obj): |
|
91 | def get_encoding(obj): | |
92 | """Get encoding for python source file defining obj |
|
92 | """Get encoding for python source file defining obj | |
93 |
|
93 | |||
94 | Returns None if obj is not defined in a sourcefile. |
|
94 | Returns None if obj is not defined in a sourcefile. | |
95 | """ |
|
95 | """ | |
96 | ofile = find_file(obj) |
|
96 | ofile = find_file(obj) | |
97 | # run contents of file through pager starting at line where the object |
|
97 | # run contents of file through pager starting at line where the object | |
98 | # is defined, as long as the file isn't binary and is actually on the |
|
98 | # is defined, as long as the file isn't binary and is actually on the | |
99 | # filesystem. |
|
99 | # filesystem. | |
100 | if ofile is None: |
|
100 | if ofile is None: | |
101 | return None |
|
101 | return None | |
102 | elif ofile.endswith(('.so', '.dll', '.pyd')): |
|
102 | elif ofile.endswith(('.so', '.dll', '.pyd')): | |
103 | return None |
|
103 | return None | |
104 | elif not os.path.isfile(ofile): |
|
104 | elif not os.path.isfile(ofile): | |
105 | return None |
|
105 | return None | |
106 | else: |
|
106 | else: | |
107 | # Print only text files, not extension binaries. Note that |
|
107 | # Print only text files, not extension binaries. Note that | |
108 | # getsourcelines returns lineno with 1-offset and page() uses |
|
108 | # getsourcelines returns lineno with 1-offset and page() uses | |
109 | # 0-offset, so we must adjust. |
|
109 | # 0-offset, so we must adjust. | |
110 | with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2 |
|
110 | with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2 | |
111 | encoding, lines = openpy.detect_encoding(buffer.readline) |
|
111 | encoding, lines = openpy.detect_encoding(buffer.readline) | |
112 | return encoding |
|
112 | return encoding | |
113 |
|
113 | |||
114 | def getdoc(obj) -> Union[str,None]: |
|
114 | def getdoc(obj) -> Union[str,None]: | |
115 | """Stable wrapper around inspect.getdoc. |
|
115 | """Stable wrapper around inspect.getdoc. | |
116 |
|
116 | |||
117 | This can't crash because of attribute problems. |
|
117 | This can't crash because of attribute problems. | |
118 |
|
118 | |||
119 | It also attempts to call a getdoc() method on the given object. This |
|
119 | It also attempts to call a getdoc() method on the given object. This | |
120 | allows objects which provide their docstrings via non-standard mechanisms |
|
120 | allows objects which provide their docstrings via non-standard mechanisms | |
121 | (like Pyro proxies) to still be inspected by ipython's ? system. |
|
121 | (like Pyro proxies) to still be inspected by ipython's ? system. | |
122 | """ |
|
122 | """ | |
123 | # Allow objects to offer customized documentation via a getdoc method: |
|
123 | # Allow objects to offer customized documentation via a getdoc method: | |
124 | try: |
|
124 | try: | |
125 | ds = obj.getdoc() |
|
125 | ds = obj.getdoc() | |
126 | except Exception: |
|
126 | except Exception: | |
127 | pass |
|
127 | pass | |
128 | else: |
|
128 | else: | |
129 | if isinstance(ds, str): |
|
129 | if isinstance(ds, str): | |
130 | return inspect.cleandoc(ds) |
|
130 | return inspect.cleandoc(ds) | |
131 | docstr = inspect.getdoc(obj) |
|
131 | docstr = inspect.getdoc(obj) | |
132 | return docstr |
|
132 | return docstr | |
133 |
|
133 | |||
134 |
|
134 | |||
135 | def getsource(obj, oname='') -> Union[str,None]: |
|
135 | def getsource(obj, oname='') -> Union[str,None]: | |
136 | """Wrapper around inspect.getsource. |
|
136 | """Wrapper around inspect.getsource. | |
137 |
|
137 | |||
138 | This can be modified by other projects to provide customized source |
|
138 | This can be modified by other projects to provide customized source | |
139 | extraction. |
|
139 | extraction. | |
140 |
|
140 | |||
141 | Parameters |
|
141 | Parameters | |
142 | ---------- |
|
142 | ---------- | |
143 | obj : object |
|
143 | obj : object | |
144 | an object whose source code we will attempt to extract |
|
144 | an object whose source code we will attempt to extract | |
145 | oname : str |
|
145 | oname : str | |
146 | (optional) a name under which the object is known |
|
146 | (optional) a name under which the object is known | |
147 |
|
147 | |||
148 | Returns |
|
148 | Returns | |
149 | ------- |
|
149 | ------- | |
150 | src : unicode or None |
|
150 | src : unicode or None | |
151 |
|
151 | |||
152 | """ |
|
152 | """ | |
153 |
|
153 | |||
154 | if isinstance(obj, property): |
|
154 | if isinstance(obj, property): | |
155 | sources = [] |
|
155 | sources = [] | |
156 | for attrname in ['fget', 'fset', 'fdel']: |
|
156 | for attrname in ['fget', 'fset', 'fdel']: | |
157 | fn = getattr(obj, attrname) |
|
157 | fn = getattr(obj, attrname) | |
158 | if fn is not None: |
|
158 | if fn is not None: | |
159 | encoding = get_encoding(fn) |
|
159 | encoding = get_encoding(fn) | |
160 | oname_prefix = ('%s.' % oname) if oname else '' |
|
160 | oname_prefix = ('%s.' % oname) if oname else '' | |
161 | sources.append(''.join(('# ', oname_prefix, attrname))) |
|
161 | sources.append(''.join(('# ', oname_prefix, attrname))) | |
162 | if inspect.isfunction(fn): |
|
162 | if inspect.isfunction(fn): | |
163 | sources.append(dedent(getsource(fn))) |
|
163 | sources.append(dedent(getsource(fn))) | |
164 | else: |
|
164 | else: | |
165 | # Default str/repr only prints function name, |
|
165 | # Default str/repr only prints function name, | |
166 | # pretty.pretty prints module name too. |
|
166 | # pretty.pretty prints module name too. | |
167 | sources.append( |
|
167 | sources.append( | |
168 | '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn)) |
|
168 | '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn)) | |
169 | ) |
|
169 | ) | |
170 | if sources: |
|
170 | if sources: | |
171 | return '\n'.join(sources) |
|
171 | return '\n'.join(sources) | |
172 | else: |
|
172 | else: | |
173 | return None |
|
173 | return None | |
174 |
|
174 | |||
175 | else: |
|
175 | else: | |
176 | # Get source for non-property objects. |
|
176 | # Get source for non-property objects. | |
177 |
|
177 | |||
178 | obj = _get_wrapped(obj) |
|
178 | obj = _get_wrapped(obj) | |
179 |
|
179 | |||
180 | try: |
|
180 | try: | |
181 | src = inspect.getsource(obj) |
|
181 | src = inspect.getsource(obj) | |
182 | except TypeError: |
|
182 | except TypeError: | |
183 | # The object itself provided no meaningful source, try looking for |
|
183 | # The object itself provided no meaningful source, try looking for | |
184 | # its class definition instead. |
|
184 | # its class definition instead. | |
185 | if hasattr(obj, '__class__'): |
|
185 | if hasattr(obj, '__class__'): | |
186 | try: |
|
186 | try: | |
187 | src = inspect.getsource(obj.__class__) |
|
187 | src = inspect.getsource(obj.__class__) | |
188 | except TypeError: |
|
188 | except TypeError: | |
189 | return None |
|
189 | return None | |
190 |
|
190 | |||
191 | return src |
|
191 | return src | |
192 |
|
192 | |||
193 |
|
193 | |||
194 | def is_simple_callable(obj): |
|
194 | def is_simple_callable(obj): | |
195 | """True if obj is a function ()""" |
|
195 | """True if obj is a function ()""" | |
196 | return (inspect.isfunction(obj) or inspect.ismethod(obj) or \ |
|
196 | return (inspect.isfunction(obj) or inspect.ismethod(obj) or \ | |
197 | isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) |
|
197 | isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) | |
198 |
|
198 | |||
199 | @undoc |
|
199 | @undoc | |
200 | def getargspec(obj): |
|
200 | def getargspec(obj): | |
201 |
"""Wrapper around :func:`inspect.getfullargspec` |
|
201 | """Wrapper around :func:`inspect.getfullargspec` | |
202 | :func:inspect.getargspec` on Python 2. |
|
|||
203 |
|
202 | |||
204 | In addition to functions and methods, this can also handle objects with a |
|
203 | In addition to functions and methods, this can also handle objects with a | |
205 | ``__call__`` attribute. |
|
204 | ``__call__`` attribute. | |
206 |
|
205 | |||
207 | DEPRECATED: Deprecated since 7.10. Do not use, will be removed. |
|
206 | DEPRECATED: Deprecated since 7.10. Do not use, will be removed. | |
208 | """ |
|
207 | """ | |
209 |
|
208 | |||
210 | warnings.warn('`getargspec` function is deprecated as of IPython 7.10' |
|
209 | warnings.warn('`getargspec` function is deprecated as of IPython 7.10' | |
211 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
210 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) | |
212 |
|
211 | |||
213 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
212 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): | |
214 | obj = obj.__call__ |
|
213 | obj = obj.__call__ | |
215 |
|
214 | |||
216 | return inspect.getfullargspec(obj) |
|
215 | return inspect.getfullargspec(obj) | |
217 |
|
216 | |||
218 | @undoc |
|
217 | @undoc | |
219 | def format_argspec(argspec): |
|
218 | def format_argspec(argspec): | |
220 | """Format argspect, convenience wrapper around inspect's. |
|
219 | """Format argspect, convenience wrapper around inspect's. | |
221 |
|
220 | |||
222 | This takes a dict instead of ordered arguments and calls |
|
221 | This takes a dict instead of ordered arguments and calls | |
223 | inspect.format_argspec with the arguments in the necessary order. |
|
222 | inspect.format_argspec with the arguments in the necessary order. | |
224 |
|
223 | |||
225 | DEPRECATED: Do not use; will be removed in future versions. |
|
224 | DEPRECATED: Do not use; will be removed in future versions. | |
226 | """ |
|
225 | """ | |
227 |
|
226 | |||
228 | warnings.warn('`format_argspec` function is deprecated as of IPython 7.10' |
|
227 | warnings.warn('`format_argspec` function is deprecated as of IPython 7.10' | |
229 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
228 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) | |
230 |
|
229 | |||
231 |
|
230 | |||
232 | return inspect.formatargspec(argspec['args'], argspec['varargs'], |
|
231 | return inspect.formatargspec(argspec['args'], argspec['varargs'], | |
233 | argspec['varkw'], argspec['defaults']) |
|
232 | argspec['varkw'], argspec['defaults']) | |
234 |
|
233 | |||
235 | @undoc |
|
234 | @undoc | |
236 | def call_tip(oinfo, format_call=True): |
|
235 | def call_tip(oinfo, format_call=True): | |
237 | """DEPRECATED. Extract call tip data from an oinfo dict. |
|
236 | """DEPRECATED. Extract call tip data from an oinfo dict. | |
238 | """ |
|
237 | """ | |
239 | warnings.warn('`call_tip` function is deprecated as of IPython 6.0' |
|
238 | warnings.warn('`call_tip` function is deprecated as of IPython 6.0' | |
240 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
239 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) | |
241 | # Get call definition |
|
240 | # Get call definition | |
242 | argspec = oinfo.get('argspec') |
|
241 | argspec = oinfo.get('argspec') | |
243 | if argspec is None: |
|
242 | if argspec is None: | |
244 | call_line = None |
|
243 | call_line = None | |
245 | else: |
|
244 | else: | |
246 | # Callable objects will have 'self' as their first argument, prune |
|
245 | # Callable objects will have 'self' as their first argument, prune | |
247 | # it out if it's there for clarity (since users do *not* pass an |
|
246 | # it out if it's there for clarity (since users do *not* pass an | |
248 | # extra first argument explicitly). |
|
247 | # extra first argument explicitly). | |
249 | try: |
|
248 | try: | |
250 | has_self = argspec['args'][0] == 'self' |
|
249 | has_self = argspec['args'][0] == 'self' | |
251 | except (KeyError, IndexError): |
|
250 | except (KeyError, IndexError): | |
252 | pass |
|
251 | pass | |
253 | else: |
|
252 | else: | |
254 | if has_self: |
|
253 | if has_self: | |
255 | argspec['args'] = argspec['args'][1:] |
|
254 | argspec['args'] = argspec['args'][1:] | |
256 |
|
255 | |||
257 | call_line = oinfo['name']+format_argspec(argspec) |
|
256 | call_line = oinfo['name']+format_argspec(argspec) | |
258 |
|
257 | |||
259 | # Now get docstring. |
|
258 | # Now get docstring. | |
260 | # The priority is: call docstring, constructor docstring, main one. |
|
259 | # The priority is: call docstring, constructor docstring, main one. | |
261 | doc = oinfo.get('call_docstring') |
|
260 | doc = oinfo.get('call_docstring') | |
262 | if doc is None: |
|
261 | if doc is None: | |
263 | doc = oinfo.get('init_docstring') |
|
262 | doc = oinfo.get('init_docstring') | |
264 | if doc is None: |
|
263 | if doc is None: | |
265 | doc = oinfo.get('docstring','') |
|
264 | doc = oinfo.get('docstring','') | |
266 |
|
265 | |||
267 | return call_line, doc |
|
266 | return call_line, doc | |
268 |
|
267 | |||
269 |
|
268 | |||
270 | def _get_wrapped(obj): |
|
269 | def _get_wrapped(obj): | |
271 | """Get the original object if wrapped in one or more @decorators |
|
270 | """Get the original object if wrapped in one or more @decorators | |
272 |
|
271 | |||
273 | Some objects automatically construct similar objects on any unrecognised |
|
272 | Some objects automatically construct similar objects on any unrecognised | |
274 | attribute access (e.g. unittest.mock.call). To protect against infinite loops, |
|
273 | attribute access (e.g. unittest.mock.call). To protect against infinite loops, | |
275 | this will arbitrarily cut off after 100 levels of obj.__wrapped__ |
|
274 | this will arbitrarily cut off after 100 levels of obj.__wrapped__ | |
276 | attribute access. --TK, Jan 2016 |
|
275 | attribute access. --TK, Jan 2016 | |
277 | """ |
|
276 | """ | |
278 | orig_obj = obj |
|
277 | orig_obj = obj | |
279 | i = 0 |
|
278 | i = 0 | |
280 | while safe_hasattr(obj, '__wrapped__'): |
|
279 | while safe_hasattr(obj, '__wrapped__'): | |
281 | obj = obj.__wrapped__ |
|
280 | obj = obj.__wrapped__ | |
282 | i += 1 |
|
281 | i += 1 | |
283 | if i > 100: |
|
282 | if i > 100: | |
284 | # __wrapped__ is probably a lie, so return the thing we started with |
|
283 | # __wrapped__ is probably a lie, so return the thing we started with | |
285 | return orig_obj |
|
284 | return orig_obj | |
286 | return obj |
|
285 | return obj | |
287 |
|
286 | |||
288 | def find_file(obj) -> str: |
|
287 | def find_file(obj) -> str: | |
289 | """Find the absolute path to the file where an object was defined. |
|
288 | """Find the absolute path to the file where an object was defined. | |
290 |
|
289 | |||
291 | This is essentially a robust wrapper around `inspect.getabsfile`. |
|
290 | This is essentially a robust wrapper around `inspect.getabsfile`. | |
292 |
|
291 | |||
293 | Returns None if no file can be found. |
|
292 | Returns None if no file can be found. | |
294 |
|
293 | |||
295 | Parameters |
|
294 | Parameters | |
296 | ---------- |
|
295 | ---------- | |
297 | obj : any Python object |
|
296 | obj : any Python object | |
298 |
|
297 | |||
299 | Returns |
|
298 | Returns | |
300 | ------- |
|
299 | ------- | |
301 | fname : str |
|
300 | fname : str | |
302 | The absolute path to the file where the object was defined. |
|
301 | The absolute path to the file where the object was defined. | |
303 | """ |
|
302 | """ | |
304 | obj = _get_wrapped(obj) |
|
303 | obj = _get_wrapped(obj) | |
305 |
|
304 | |||
306 | fname = None |
|
305 | fname = None | |
307 | try: |
|
306 | try: | |
308 | fname = inspect.getabsfile(obj) |
|
307 | fname = inspect.getabsfile(obj) | |
309 | except TypeError: |
|
308 | except TypeError: | |
310 | # For an instance, the file that matters is where its class was |
|
309 | # For an instance, the file that matters is where its class was | |
311 | # declared. |
|
310 | # declared. | |
312 | if hasattr(obj, '__class__'): |
|
311 | if hasattr(obj, '__class__'): | |
313 | try: |
|
312 | try: | |
314 | fname = inspect.getabsfile(obj.__class__) |
|
313 | fname = inspect.getabsfile(obj.__class__) | |
315 | except TypeError: |
|
314 | except TypeError: | |
316 | # Can happen for builtins |
|
315 | # Can happen for builtins | |
317 | pass |
|
316 | pass | |
318 | except: |
|
317 | except: | |
319 | pass |
|
318 | pass | |
320 | return cast_unicode(fname) |
|
319 | return cast_unicode(fname) | |
321 |
|
320 | |||
322 |
|
321 | |||
323 | def find_source_lines(obj): |
|
322 | def find_source_lines(obj): | |
324 | """Find the line number in a file where an object was defined. |
|
323 | """Find the line number in a file where an object was defined. | |
325 |
|
324 | |||
326 | This is essentially a robust wrapper around `inspect.getsourcelines`. |
|
325 | This is essentially a robust wrapper around `inspect.getsourcelines`. | |
327 |
|
326 | |||
328 | Returns None if no file can be found. |
|
327 | Returns None if no file can be found. | |
329 |
|
328 | |||
330 | Parameters |
|
329 | Parameters | |
331 | ---------- |
|
330 | ---------- | |
332 | obj : any Python object |
|
331 | obj : any Python object | |
333 |
|
332 | |||
334 | Returns |
|
333 | Returns | |
335 | ------- |
|
334 | ------- | |
336 | lineno : int |
|
335 | lineno : int | |
337 | The line number where the object definition starts. |
|
336 | The line number where the object definition starts. | |
338 | """ |
|
337 | """ | |
339 | obj = _get_wrapped(obj) |
|
338 | obj = _get_wrapped(obj) | |
340 |
|
339 | |||
341 | try: |
|
340 | try: | |
342 | try: |
|
341 | try: | |
343 | lineno = inspect.getsourcelines(obj)[1] |
|
342 | lineno = inspect.getsourcelines(obj)[1] | |
344 | except TypeError: |
|
343 | except TypeError: | |
345 | # For instances, try the class object like getsource() does |
|
344 | # For instances, try the class object like getsource() does | |
346 | if hasattr(obj, '__class__'): |
|
345 | if hasattr(obj, '__class__'): | |
347 | lineno = inspect.getsourcelines(obj.__class__)[1] |
|
346 | lineno = inspect.getsourcelines(obj.__class__)[1] | |
348 | else: |
|
347 | else: | |
349 | lineno = None |
|
348 | lineno = None | |
350 | except: |
|
349 | except: | |
351 | return None |
|
350 | return None | |
352 |
|
351 | |||
353 | return lineno |
|
352 | return lineno | |
354 |
|
353 | |||
355 | class Inspector(Colorable): |
|
354 | class Inspector(Colorable): | |
356 |
|
355 | |||
357 | def __init__(self, color_table=InspectColors, |
|
356 | def __init__(self, color_table=InspectColors, | |
358 | code_color_table=PyColorize.ANSICodeColors, |
|
357 | code_color_table=PyColorize.ANSICodeColors, | |
359 | scheme=None, |
|
358 | scheme=None, | |
360 | str_detail_level=0, |
|
359 | str_detail_level=0, | |
361 | parent=None, config=None): |
|
360 | parent=None, config=None): | |
362 | super(Inspector, self).__init__(parent=parent, config=config) |
|
361 | super(Inspector, self).__init__(parent=parent, config=config) | |
363 | self.color_table = color_table |
|
362 | self.color_table = color_table | |
364 | self.parser = PyColorize.Parser(out='str', parent=self, style=scheme) |
|
363 | self.parser = PyColorize.Parser(out='str', parent=self, style=scheme) | |
365 | self.format = self.parser.format |
|
364 | self.format = self.parser.format | |
366 | self.str_detail_level = str_detail_level |
|
365 | self.str_detail_level = str_detail_level | |
367 | self.set_active_scheme(scheme) |
|
366 | self.set_active_scheme(scheme) | |
368 |
|
367 | |||
369 | def _getdef(self,obj,oname='') -> Union[str,None]: |
|
368 | def _getdef(self,obj,oname='') -> Union[str,None]: | |
370 | """Return the call signature for any callable object. |
|
369 | """Return the call signature for any callable object. | |
371 |
|
370 | |||
372 | If any exception is generated, None is returned instead and the |
|
371 | If any exception is generated, None is returned instead and the | |
373 | exception is suppressed.""" |
|
372 | exception is suppressed.""" | |
374 | try: |
|
373 | try: | |
375 | return _render_signature(signature(obj), oname) |
|
374 | return _render_signature(signature(obj), oname) | |
376 | except: |
|
375 | except: | |
377 | return None |
|
376 | return None | |
378 |
|
377 | |||
379 | def __head(self,h) -> str: |
|
378 | def __head(self,h) -> str: | |
380 | """Return a header string with proper colors.""" |
|
379 | """Return a header string with proper colors.""" | |
381 | return '%s%s%s' % (self.color_table.active_colors.header,h, |
|
380 | return '%s%s%s' % (self.color_table.active_colors.header,h, | |
382 | self.color_table.active_colors.normal) |
|
381 | self.color_table.active_colors.normal) | |
383 |
|
382 | |||
384 | def set_active_scheme(self, scheme): |
|
383 | def set_active_scheme(self, scheme): | |
385 | if scheme is not None: |
|
384 | if scheme is not None: | |
386 | self.color_table.set_active_scheme(scheme) |
|
385 | self.color_table.set_active_scheme(scheme) | |
387 | self.parser.color_table.set_active_scheme(scheme) |
|
386 | self.parser.color_table.set_active_scheme(scheme) | |
388 |
|
387 | |||
389 | def noinfo(self, msg, oname): |
|
388 | def noinfo(self, msg, oname): | |
390 | """Generic message when no information is found.""" |
|
389 | """Generic message when no information is found.""" | |
391 | print('No %s found' % msg, end=' ') |
|
390 | print('No %s found' % msg, end=' ') | |
392 | if oname: |
|
391 | if oname: | |
393 | print('for %s' % oname) |
|
392 | print('for %s' % oname) | |
394 | else: |
|
393 | else: | |
395 | print() |
|
394 | print() | |
396 |
|
395 | |||
397 | def pdef(self, obj, oname=''): |
|
396 | def pdef(self, obj, oname=''): | |
398 | """Print the call signature for any callable object. |
|
397 | """Print the call signature for any callable object. | |
399 |
|
398 | |||
400 | If the object is a class, print the constructor information.""" |
|
399 | If the object is a class, print the constructor information.""" | |
401 |
|
400 | |||
402 | if not callable(obj): |
|
401 | if not callable(obj): | |
403 | print('Object is not callable.') |
|
402 | print('Object is not callable.') | |
404 | return |
|
403 | return | |
405 |
|
404 | |||
406 | header = '' |
|
405 | header = '' | |
407 |
|
406 | |||
408 | if inspect.isclass(obj): |
|
407 | if inspect.isclass(obj): | |
409 | header = self.__head('Class constructor information:\n') |
|
408 | header = self.__head('Class constructor information:\n') | |
410 |
|
409 | |||
411 |
|
410 | |||
412 | output = self._getdef(obj,oname) |
|
411 | output = self._getdef(obj,oname) | |
413 | if output is None: |
|
412 | if output is None: | |
414 | self.noinfo('definition header',oname) |
|
413 | self.noinfo('definition header',oname) | |
415 | else: |
|
414 | else: | |
416 | print(header,self.format(output), end=' ') |
|
415 | print(header,self.format(output), end=' ') | |
417 |
|
416 | |||
418 | # In Python 3, all classes are new-style, so they all have __init__. |
|
417 | # In Python 3, all classes are new-style, so they all have __init__. | |
419 | @skip_doctest |
|
418 | @skip_doctest | |
420 | def pdoc(self, obj, oname='', formatter=None): |
|
419 | def pdoc(self, obj, oname='', formatter=None): | |
421 | """Print the docstring for any object. |
|
420 | """Print the docstring for any object. | |
422 |
|
421 | |||
423 | Optional: |
|
422 | Optional: | |
424 | -formatter: a function to run the docstring through for specially |
|
423 | -formatter: a function to run the docstring through for specially | |
425 | formatted docstrings. |
|
424 | formatted docstrings. | |
426 |
|
425 | |||
427 | Examples |
|
426 | Examples | |
428 | -------- |
|
427 | -------- | |
429 |
|
428 | |||
430 | In [1]: class NoInit: |
|
429 | In [1]: class NoInit: | |
431 | ...: pass |
|
430 | ...: pass | |
432 |
|
431 | |||
433 | In [2]: class NoDoc: |
|
432 | In [2]: class NoDoc: | |
434 | ...: def __init__(self): |
|
433 | ...: def __init__(self): | |
435 | ...: pass |
|
434 | ...: pass | |
436 |
|
435 | |||
437 | In [3]: %pdoc NoDoc |
|
436 | In [3]: %pdoc NoDoc | |
438 | No documentation found for NoDoc |
|
437 | No documentation found for NoDoc | |
439 |
|
438 | |||
440 | In [4]: %pdoc NoInit |
|
439 | In [4]: %pdoc NoInit | |
441 | No documentation found for NoInit |
|
440 | No documentation found for NoInit | |
442 |
|
441 | |||
443 | In [5]: obj = NoInit() |
|
442 | In [5]: obj = NoInit() | |
444 |
|
443 | |||
445 | In [6]: %pdoc obj |
|
444 | In [6]: %pdoc obj | |
446 | No documentation found for obj |
|
445 | No documentation found for obj | |
447 |
|
446 | |||
448 | In [5]: obj2 = NoDoc() |
|
447 | In [5]: obj2 = NoDoc() | |
449 |
|
448 | |||
450 | In [6]: %pdoc obj2 |
|
449 | In [6]: %pdoc obj2 | |
451 | No documentation found for obj2 |
|
450 | No documentation found for obj2 | |
452 | """ |
|
451 | """ | |
453 |
|
452 | |||
454 | head = self.__head # For convenience |
|
453 | head = self.__head # For convenience | |
455 | lines = [] |
|
454 | lines = [] | |
456 | ds = getdoc(obj) |
|
455 | ds = getdoc(obj) | |
457 | if formatter: |
|
456 | if formatter: | |
458 | ds = formatter(ds).get('plain/text', ds) |
|
457 | ds = formatter(ds).get('plain/text', ds) | |
459 | if ds: |
|
458 | if ds: | |
460 | lines.append(head("Class docstring:")) |
|
459 | lines.append(head("Class docstring:")) | |
461 | lines.append(indent(ds)) |
|
460 | lines.append(indent(ds)) | |
462 | if inspect.isclass(obj) and hasattr(obj, '__init__'): |
|
461 | if inspect.isclass(obj) and hasattr(obj, '__init__'): | |
463 | init_ds = getdoc(obj.__init__) |
|
462 | init_ds = getdoc(obj.__init__) | |
464 | if init_ds is not None: |
|
463 | if init_ds is not None: | |
465 | lines.append(head("Init docstring:")) |
|
464 | lines.append(head("Init docstring:")) | |
466 | lines.append(indent(init_ds)) |
|
465 | lines.append(indent(init_ds)) | |
467 | elif hasattr(obj,'__call__'): |
|
466 | elif hasattr(obj,'__call__'): | |
468 | call_ds = getdoc(obj.__call__) |
|
467 | call_ds = getdoc(obj.__call__) | |
469 | if call_ds: |
|
468 | if call_ds: | |
470 | lines.append(head("Call docstring:")) |
|
469 | lines.append(head("Call docstring:")) | |
471 | lines.append(indent(call_ds)) |
|
470 | lines.append(indent(call_ds)) | |
472 |
|
471 | |||
473 | if not lines: |
|
472 | if not lines: | |
474 | self.noinfo('documentation',oname) |
|
473 | self.noinfo('documentation',oname) | |
475 | else: |
|
474 | else: | |
476 | page.page('\n'.join(lines)) |
|
475 | page.page('\n'.join(lines)) | |
477 |
|
476 | |||
478 | def psource(self, obj, oname=''): |
|
477 | def psource(self, obj, oname=''): | |
479 | """Print the source code for an object.""" |
|
478 | """Print the source code for an object.""" | |
480 |
|
479 | |||
481 | # Flush the source cache because inspect can return out-of-date source |
|
480 | # Flush the source cache because inspect can return out-of-date source | |
482 | linecache.checkcache() |
|
481 | linecache.checkcache() | |
483 | try: |
|
482 | try: | |
484 | src = getsource(obj, oname=oname) |
|
483 | src = getsource(obj, oname=oname) | |
485 | except Exception: |
|
484 | except Exception: | |
486 | src = None |
|
485 | src = None | |
487 |
|
486 | |||
488 | if src is None: |
|
487 | if src is None: | |
489 | self.noinfo('source', oname) |
|
488 | self.noinfo('source', oname) | |
490 | else: |
|
489 | else: | |
491 | page.page(self.format(src)) |
|
490 | page.page(self.format(src)) | |
492 |
|
491 | |||
493 | def pfile(self, obj, oname=''): |
|
492 | def pfile(self, obj, oname=''): | |
494 | """Show the whole file where an object was defined.""" |
|
493 | """Show the whole file where an object was defined.""" | |
495 |
|
494 | |||
496 | lineno = find_source_lines(obj) |
|
495 | lineno = find_source_lines(obj) | |
497 | if lineno is None: |
|
496 | if lineno is None: | |
498 | self.noinfo('file', oname) |
|
497 | self.noinfo('file', oname) | |
499 | return |
|
498 | return | |
500 |
|
499 | |||
501 | ofile = find_file(obj) |
|
500 | ofile = find_file(obj) | |
502 | # run contents of file through pager starting at line where the object |
|
501 | # run contents of file through pager starting at line where the object | |
503 | # is defined, as long as the file isn't binary and is actually on the |
|
502 | # is defined, as long as the file isn't binary and is actually on the | |
504 | # filesystem. |
|
503 | # filesystem. | |
505 | if ofile.endswith(('.so', '.dll', '.pyd')): |
|
504 | if ofile.endswith(('.so', '.dll', '.pyd')): | |
506 | print('File %r is binary, not printing.' % ofile) |
|
505 | print('File %r is binary, not printing.' % ofile) | |
507 | elif not os.path.isfile(ofile): |
|
506 | elif not os.path.isfile(ofile): | |
508 | print('File %r does not exist, not printing.' % ofile) |
|
507 | print('File %r does not exist, not printing.' % ofile) | |
509 | else: |
|
508 | else: | |
510 | # Print only text files, not extension binaries. Note that |
|
509 | # Print only text files, not extension binaries. Note that | |
511 | # getsourcelines returns lineno with 1-offset and page() uses |
|
510 | # getsourcelines returns lineno with 1-offset and page() uses | |
512 | # 0-offset, so we must adjust. |
|
511 | # 0-offset, so we must adjust. | |
513 | page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1) |
|
512 | page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1) | |
514 |
|
513 | |||
515 |
|
514 | |||
516 | def _mime_format(self, text:str, formatter=None) -> dict: |
|
515 | def _mime_format(self, text:str, formatter=None) -> dict: | |
517 | """Return a mime bundle representation of the input text. |
|
516 | """Return a mime bundle representation of the input text. | |
518 |
|
517 | |||
519 | - if `formatter` is None, the returned mime bundle has |
|
518 | - if `formatter` is None, the returned mime bundle has | |
520 | a `text/plain` field, with the input text. |
|
519 | a `text/plain` field, with the input text. | |
521 | a `text/html` field with a `<pre>` tag containing the input text. |
|
520 | a `text/html` field with a `<pre>` tag containing the input text. | |
522 |
|
521 | |||
523 | - if `formatter` is not None, it must be a callable transforming the |
|
522 | - if `formatter` is not None, it must be a callable transforming the | |
524 | input text into a mime bundle. Default values for `text/plain` and |
|
523 | input text into a mime bundle. Default values for `text/plain` and | |
525 | `text/html` representations are the ones described above. |
|
524 | `text/html` representations are the ones described above. | |
526 |
|
525 | |||
527 | Note: |
|
526 | Note: | |
528 |
|
527 | |||
529 | Formatters returning strings are supported but this behavior is deprecated. |
|
528 | Formatters returning strings are supported but this behavior is deprecated. | |
530 |
|
529 | |||
531 | """ |
|
530 | """ | |
532 | defaults = { |
|
531 | defaults = { | |
533 | 'text/plain': text, |
|
532 | 'text/plain': text, | |
534 | 'text/html': '<pre>' + text + '</pre>' |
|
533 | 'text/html': '<pre>' + text + '</pre>' | |
535 | } |
|
534 | } | |
536 |
|
535 | |||
537 | if formatter is None: |
|
536 | if formatter is None: | |
538 | return defaults |
|
537 | return defaults | |
539 | else: |
|
538 | else: | |
540 | formatted = formatter(text) |
|
539 | formatted = formatter(text) | |
541 |
|
540 | |||
542 | if not isinstance(formatted, dict): |
|
541 | if not isinstance(formatted, dict): | |
543 | # Handle the deprecated behavior of a formatter returning |
|
542 | # Handle the deprecated behavior of a formatter returning | |
544 | # a string instead of a mime bundle. |
|
543 | # a string instead of a mime bundle. | |
545 | return { |
|
544 | return { | |
546 | 'text/plain': formatted, |
|
545 | 'text/plain': formatted, | |
547 | 'text/html': '<pre>' + formatted + '</pre>' |
|
546 | 'text/html': '<pre>' + formatted + '</pre>' | |
548 | } |
|
547 | } | |
549 |
|
548 | |||
550 | else: |
|
549 | else: | |
551 | return dict(defaults, **formatted) |
|
550 | return dict(defaults, **formatted) | |
552 |
|
551 | |||
553 |
|
552 | |||
554 | def format_mime(self, bundle): |
|
553 | def format_mime(self, bundle): | |
555 |
|
554 | |||
556 | text_plain = bundle['text/plain'] |
|
555 | text_plain = bundle['text/plain'] | |
557 |
|
556 | |||
558 | text = '' |
|
557 | text = '' | |
559 | heads, bodies = list(zip(*text_plain)) |
|
558 | heads, bodies = list(zip(*text_plain)) | |
560 | _len = max(len(h) for h in heads) |
|
559 | _len = max(len(h) for h in heads) | |
561 |
|
560 | |||
562 | for head, body in zip(heads, bodies): |
|
561 | for head, body in zip(heads, bodies): | |
563 | body = body.strip('\n') |
|
562 | body = body.strip('\n') | |
564 | delim = '\n' if '\n' in body else ' ' |
|
563 | delim = '\n' if '\n' in body else ' ' | |
565 | text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n' |
|
564 | text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n' | |
566 |
|
565 | |||
567 | bundle['text/plain'] = text |
|
566 | bundle['text/plain'] = text | |
568 | return bundle |
|
567 | return bundle | |
569 |
|
568 | |||
570 | def _get_info(self, obj, oname='', formatter=None, info=None, detail_level=0): |
|
569 | def _get_info(self, obj, oname='', formatter=None, info=None, detail_level=0): | |
571 | """Retrieve an info dict and format it. |
|
570 | """Retrieve an info dict and format it. | |
572 |
|
571 | |||
573 | Parameters |
|
572 | Parameters | |
574 | ========== |
|
573 | ========== | |
575 |
|
574 | |||
576 | obj: any |
|
575 | obj: any | |
577 | Object to inspect and return info from |
|
576 | Object to inspect and return info from | |
578 | oname: str (default: ''): |
|
577 | oname: str (default: ''): | |
579 | Name of the variable pointing to `obj`. |
|
578 | Name of the variable pointing to `obj`. | |
580 | formatter: callable |
|
579 | formatter: callable | |
581 | info: |
|
580 | info: | |
582 | already computed information |
|
581 | already computed information | |
583 | detail_level: integer |
|
582 | detail_level: integer | |
584 | Granularity of detail level, if set to 1, give more information. |
|
583 | Granularity of detail level, if set to 1, give more information. | |
585 | """ |
|
584 | """ | |
586 |
|
585 | |||
587 | info = self._info(obj, oname=oname, info=info, detail_level=detail_level) |
|
586 | info = self._info(obj, oname=oname, info=info, detail_level=detail_level) | |
588 |
|
587 | |||
589 | _mime = { |
|
588 | _mime = { | |
590 | 'text/plain': [], |
|
589 | 'text/plain': [], | |
591 | 'text/html': '', |
|
590 | 'text/html': '', | |
592 | } |
|
591 | } | |
593 |
|
592 | |||
594 | def append_field(bundle, title:str, key:str, formatter=None): |
|
593 | def append_field(bundle, title:str, key:str, formatter=None): | |
595 | field = info[key] |
|
594 | field = info[key] | |
596 | if field is not None: |
|
595 | if field is not None: | |
597 | formatted_field = self._mime_format(field, formatter) |
|
596 | formatted_field = self._mime_format(field, formatter) | |
598 | bundle['text/plain'].append((title, formatted_field['text/plain'])) |
|
597 | bundle['text/plain'].append((title, formatted_field['text/plain'])) | |
599 | bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n' |
|
598 | bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n' | |
600 |
|
599 | |||
601 | def code_formatter(text): |
|
600 | def code_formatter(text): | |
602 | return { |
|
601 | return { | |
603 | 'text/plain': self.format(text), |
|
602 | 'text/plain': self.format(text), | |
604 | 'text/html': pylight(text) |
|
603 | 'text/html': pylight(text) | |
605 | } |
|
604 | } | |
606 |
|
605 | |||
607 | if info['isalias']: |
|
606 | if info['isalias']: | |
608 | append_field(_mime, 'Repr', 'string_form') |
|
607 | append_field(_mime, 'Repr', 'string_form') | |
609 |
|
608 | |||
610 | elif info['ismagic']: |
|
609 | elif info['ismagic']: | |
611 | if detail_level > 0: |
|
610 | if detail_level > 0: | |
612 | append_field(_mime, 'Source', 'source', code_formatter) |
|
611 | append_field(_mime, 'Source', 'source', code_formatter) | |
613 | else: |
|
612 | else: | |
614 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
613 | append_field(_mime, 'Docstring', 'docstring', formatter) | |
615 | append_field(_mime, 'File', 'file') |
|
614 | append_field(_mime, 'File', 'file') | |
616 |
|
615 | |||
617 | elif info['isclass'] or is_simple_callable(obj): |
|
616 | elif info['isclass'] or is_simple_callable(obj): | |
618 | # Functions, methods, classes |
|
617 | # Functions, methods, classes | |
619 | append_field(_mime, 'Signature', 'definition', code_formatter) |
|
618 | append_field(_mime, 'Signature', 'definition', code_formatter) | |
620 | append_field(_mime, 'Init signature', 'init_definition', code_formatter) |
|
619 | append_field(_mime, 'Init signature', 'init_definition', code_formatter) | |
621 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
620 | append_field(_mime, 'Docstring', 'docstring', formatter) | |
622 | if detail_level > 0 and info['source']: |
|
621 | if detail_level > 0 and info['source']: | |
623 | append_field(_mime, 'Source', 'source', code_formatter) |
|
622 | append_field(_mime, 'Source', 'source', code_formatter) | |
624 | else: |
|
623 | else: | |
625 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) |
|
624 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) | |
626 |
|
625 | |||
627 | append_field(_mime, 'File', 'file') |
|
626 | append_field(_mime, 'File', 'file') | |
628 | append_field(_mime, 'Type', 'type_name') |
|
627 | append_field(_mime, 'Type', 'type_name') | |
629 | append_field(_mime, 'Subclasses', 'subclasses') |
|
628 | append_field(_mime, 'Subclasses', 'subclasses') | |
630 |
|
629 | |||
631 | else: |
|
630 | else: | |
632 | # General Python objects |
|
631 | # General Python objects | |
633 | append_field(_mime, 'Signature', 'definition', code_formatter) |
|
632 | append_field(_mime, 'Signature', 'definition', code_formatter) | |
634 | append_field(_mime, 'Call signature', 'call_def', code_formatter) |
|
633 | append_field(_mime, 'Call signature', 'call_def', code_formatter) | |
635 | append_field(_mime, 'Type', 'type_name') |
|
634 | append_field(_mime, 'Type', 'type_name') | |
636 | append_field(_mime, 'String form', 'string_form') |
|
635 | append_field(_mime, 'String form', 'string_form') | |
637 |
|
636 | |||
638 | # Namespace |
|
637 | # Namespace | |
639 | if info['namespace'] != 'Interactive': |
|
638 | if info['namespace'] != 'Interactive': | |
640 | append_field(_mime, 'Namespace', 'namespace') |
|
639 | append_field(_mime, 'Namespace', 'namespace') | |
641 |
|
640 | |||
642 | append_field(_mime, 'Length', 'length') |
|
641 | append_field(_mime, 'Length', 'length') | |
643 | append_field(_mime, 'File', 'file') |
|
642 | append_field(_mime, 'File', 'file') | |
644 |
|
643 | |||
645 | # Source or docstring, depending on detail level and whether |
|
644 | # Source or docstring, depending on detail level and whether | |
646 | # source found. |
|
645 | # source found. | |
647 | if detail_level > 0 and info['source']: |
|
646 | if detail_level > 0 and info['source']: | |
648 | append_field(_mime, 'Source', 'source', code_formatter) |
|
647 | append_field(_mime, 'Source', 'source', code_formatter) | |
649 | else: |
|
648 | else: | |
650 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
649 | append_field(_mime, 'Docstring', 'docstring', formatter) | |
651 |
|
650 | |||
652 | append_field(_mime, 'Class docstring', 'class_docstring', formatter) |
|
651 | append_field(_mime, 'Class docstring', 'class_docstring', formatter) | |
653 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) |
|
652 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) | |
654 | append_field(_mime, 'Call docstring', 'call_docstring', formatter) |
|
653 | append_field(_mime, 'Call docstring', 'call_docstring', formatter) | |
655 |
|
654 | |||
656 |
|
655 | |||
657 | return self.format_mime(_mime) |
|
656 | return self.format_mime(_mime) | |
658 |
|
657 | |||
659 | def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0, enable_html_pager=True): |
|
658 | def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0, enable_html_pager=True): | |
660 | """Show detailed information about an object. |
|
659 | """Show detailed information about an object. | |
661 |
|
660 | |||
662 | Optional arguments: |
|
661 | Optional arguments: | |
663 |
|
662 | |||
664 | - oname: name of the variable pointing to the object. |
|
663 | - oname: name of the variable pointing to the object. | |
665 |
|
664 | |||
666 | - formatter: callable (optional) |
|
665 | - formatter: callable (optional) | |
667 | A special formatter for docstrings. |
|
666 | A special formatter for docstrings. | |
668 |
|
667 | |||
669 | The formatter is a callable that takes a string as an input |
|
668 | The formatter is a callable that takes a string as an input | |
670 | and returns either a formatted string or a mime type bundle |
|
669 | and returns either a formatted string or a mime type bundle | |
671 | in the form of a dictionary. |
|
670 | in the form of a dictionary. | |
672 |
|
671 | |||
673 | Although the support of custom formatter returning a string |
|
672 | Although the support of custom formatter returning a string | |
674 | instead of a mime type bundle is deprecated. |
|
673 | instead of a mime type bundle is deprecated. | |
675 |
|
674 | |||
676 | - info: a structure with some information fields which may have been |
|
675 | - info: a structure with some information fields which may have been | |
677 | precomputed already. |
|
676 | precomputed already. | |
678 |
|
677 | |||
679 | - detail_level: if set to 1, more information is given. |
|
678 | - detail_level: if set to 1, more information is given. | |
680 | """ |
|
679 | """ | |
681 | info = self._get_info(obj, oname, formatter, info, detail_level) |
|
680 | info = self._get_info(obj, oname, formatter, info, detail_level) | |
682 | if not enable_html_pager: |
|
681 | if not enable_html_pager: | |
683 | del info['text/html'] |
|
682 | del info['text/html'] | |
684 | page.page(info) |
|
683 | page.page(info) | |
685 |
|
684 | |||
686 | def info(self, obj, oname='', formatter=None, info=None, detail_level=0): |
|
685 | def info(self, obj, oname='', formatter=None, info=None, detail_level=0): | |
687 | """DEPRECATED. Compute a dict with detailed information about an object. |
|
686 | """DEPRECATED. Compute a dict with detailed information about an object. | |
688 | """ |
|
687 | """ | |
689 | if formatter is not None: |
|
688 | if formatter is not None: | |
690 | warnings.warn('The `formatter` keyword argument to `Inspector.info`' |
|
689 | warnings.warn('The `formatter` keyword argument to `Inspector.info`' | |
691 | 'is deprecated as of IPython 5.0 and will have no effects.', |
|
690 | 'is deprecated as of IPython 5.0 and will have no effects.', | |
692 | DeprecationWarning, stacklevel=2) |
|
691 | DeprecationWarning, stacklevel=2) | |
693 | return self._info(obj, oname=oname, info=info, detail_level=detail_level) |
|
692 | return self._info(obj, oname=oname, info=info, detail_level=detail_level) | |
694 |
|
693 | |||
695 | def _info(self, obj, oname='', info=None, detail_level=0) -> dict: |
|
694 | def _info(self, obj, oname='', info=None, detail_level=0) -> dict: | |
696 | """Compute a dict with detailed information about an object. |
|
695 | """Compute a dict with detailed information about an object. | |
697 |
|
696 | |||
698 | Parameters |
|
697 | Parameters | |
699 | ========== |
|
698 | ========== | |
700 |
|
699 | |||
701 | obj: any |
|
700 | obj: any | |
702 | An object to find information about |
|
701 | An object to find information about | |
703 | oname: str (default: ''): |
|
702 | oname: str (default: ''): | |
704 | Name of the variable pointing to `obj`. |
|
703 | Name of the variable pointing to `obj`. | |
705 | info: (default: None) |
|
704 | info: (default: None) | |
706 | A struct (dict like with attr access) with some information fields |
|
705 | A struct (dict like with attr access) with some information fields | |
707 | which may have been precomputed already. |
|
706 | which may have been precomputed already. | |
708 | detail_level: int (default:0) |
|
707 | detail_level: int (default:0) | |
709 | If set to 1, more information is given. |
|
708 | If set to 1, more information is given. | |
710 |
|
709 | |||
711 | Returns |
|
710 | Returns | |
712 | ======= |
|
711 | ======= | |
713 |
|
712 | |||
714 | An object info dict with known fields from `info_fields`. Keys are |
|
713 | An object info dict with known fields from `info_fields`. Keys are | |
715 | strings, values are string or None. |
|
714 | strings, values are string or None. | |
716 | """ |
|
715 | """ | |
717 |
|
716 | |||
718 | if info is None: |
|
717 | if info is None: | |
719 | ismagic = False |
|
718 | ismagic = False | |
720 | isalias = False |
|
719 | isalias = False | |
721 | ospace = '' |
|
720 | ospace = '' | |
722 | else: |
|
721 | else: | |
723 | ismagic = info.ismagic |
|
722 | ismagic = info.ismagic | |
724 | isalias = info.isalias |
|
723 | isalias = info.isalias | |
725 | ospace = info.namespace |
|
724 | ospace = info.namespace | |
726 |
|
725 | |||
727 | # Get docstring, special-casing aliases: |
|
726 | # Get docstring, special-casing aliases: | |
728 | if isalias: |
|
727 | if isalias: | |
729 | if not callable(obj): |
|
728 | if not callable(obj): | |
730 | try: |
|
729 | try: | |
731 | ds = "Alias to the system command:\n %s" % obj[1] |
|
730 | ds = "Alias to the system command:\n %s" % obj[1] | |
732 | except: |
|
731 | except: | |
733 | ds = "Alias: " + str(obj) |
|
732 | ds = "Alias: " + str(obj) | |
734 | else: |
|
733 | else: | |
735 | ds = "Alias to " + str(obj) |
|
734 | ds = "Alias to " + str(obj) | |
736 | if obj.__doc__: |
|
735 | if obj.__doc__: | |
737 | ds += "\nDocstring:\n" + obj.__doc__ |
|
736 | ds += "\nDocstring:\n" + obj.__doc__ | |
738 | else: |
|
737 | else: | |
739 | ds = getdoc(obj) |
|
738 | ds = getdoc(obj) | |
740 | if ds is None: |
|
739 | if ds is None: | |
741 | ds = '<no docstring>' |
|
740 | ds = '<no docstring>' | |
742 |
|
741 | |||
743 | # store output in a dict, we initialize it here and fill it as we go |
|
742 | # store output in a dict, we initialize it here and fill it as we go | |
744 | out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None) |
|
743 | out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None) | |
745 |
|
744 | |||
746 | string_max = 200 # max size of strings to show (snipped if longer) |
|
745 | string_max = 200 # max size of strings to show (snipped if longer) | |
747 | shalf = int((string_max - 5) / 2) |
|
746 | shalf = int((string_max - 5) / 2) | |
748 |
|
747 | |||
749 | if ismagic: |
|
748 | if ismagic: | |
750 | out['type_name'] = 'Magic function' |
|
749 | out['type_name'] = 'Magic function' | |
751 | elif isalias: |
|
750 | elif isalias: | |
752 | out['type_name'] = 'System alias' |
|
751 | out['type_name'] = 'System alias' | |
753 | else: |
|
752 | else: | |
754 | out['type_name'] = type(obj).__name__ |
|
753 | out['type_name'] = type(obj).__name__ | |
755 |
|
754 | |||
756 | try: |
|
755 | try: | |
757 | bclass = obj.__class__ |
|
756 | bclass = obj.__class__ | |
758 | out['base_class'] = str(bclass) |
|
757 | out['base_class'] = str(bclass) | |
759 | except: |
|
758 | except: | |
760 | pass |
|
759 | pass | |
761 |
|
760 | |||
762 | # String form, but snip if too long in ? form (full in ??) |
|
761 | # String form, but snip if too long in ? form (full in ??) | |
763 | if detail_level >= self.str_detail_level: |
|
762 | if detail_level >= self.str_detail_level: | |
764 | try: |
|
763 | try: | |
765 | ostr = str(obj) |
|
764 | ostr = str(obj) | |
766 | str_head = 'string_form' |
|
765 | str_head = 'string_form' | |
767 | if not detail_level and len(ostr)>string_max: |
|
766 | if not detail_level and len(ostr)>string_max: | |
768 | ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] |
|
767 | ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] | |
769 | ostr = ("\n" + " " * len(str_head.expandtabs())).\ |
|
768 | ostr = ("\n" + " " * len(str_head.expandtabs())).\ | |
770 | join(q.strip() for q in ostr.split("\n")) |
|
769 | join(q.strip() for q in ostr.split("\n")) | |
771 | out[str_head] = ostr |
|
770 | out[str_head] = ostr | |
772 | except: |
|
771 | except: | |
773 | pass |
|
772 | pass | |
774 |
|
773 | |||
775 | if ospace: |
|
774 | if ospace: | |
776 | out['namespace'] = ospace |
|
775 | out['namespace'] = ospace | |
777 |
|
776 | |||
778 | # Length (for strings and lists) |
|
777 | # Length (for strings and lists) | |
779 | try: |
|
778 | try: | |
780 | out['length'] = str(len(obj)) |
|
779 | out['length'] = str(len(obj)) | |
781 | except Exception: |
|
780 | except Exception: | |
782 | pass |
|
781 | pass | |
783 |
|
782 | |||
784 | # Filename where object was defined |
|
783 | # Filename where object was defined | |
785 | binary_file = False |
|
784 | binary_file = False | |
786 | fname = find_file(obj) |
|
785 | fname = find_file(obj) | |
787 | if fname is None: |
|
786 | if fname is None: | |
788 | # if anything goes wrong, we don't want to show source, so it's as |
|
787 | # if anything goes wrong, we don't want to show source, so it's as | |
789 | # if the file was binary |
|
788 | # if the file was binary | |
790 | binary_file = True |
|
789 | binary_file = True | |
791 | else: |
|
790 | else: | |
792 | if fname.endswith(('.so', '.dll', '.pyd')): |
|
791 | if fname.endswith(('.so', '.dll', '.pyd')): | |
793 | binary_file = True |
|
792 | binary_file = True | |
794 | elif fname.endswith('<string>'): |
|
793 | elif fname.endswith('<string>'): | |
795 | fname = 'Dynamically generated function. No source code available.' |
|
794 | fname = 'Dynamically generated function. No source code available.' | |
796 | out['file'] = compress_user(fname) |
|
795 | out['file'] = compress_user(fname) | |
797 |
|
796 | |||
798 | # Original source code for a callable, class or property. |
|
797 | # Original source code for a callable, class or property. | |
799 | if detail_level: |
|
798 | if detail_level: | |
800 | # Flush the source cache because inspect can return out-of-date |
|
799 | # Flush the source cache because inspect can return out-of-date | |
801 | # source |
|
800 | # source | |
802 | linecache.checkcache() |
|
801 | linecache.checkcache() | |
803 | try: |
|
802 | try: | |
804 | if isinstance(obj, property) or not binary_file: |
|
803 | if isinstance(obj, property) or not binary_file: | |
805 | src = getsource(obj, oname) |
|
804 | src = getsource(obj, oname) | |
806 | if src is not None: |
|
805 | if src is not None: | |
807 | src = src.rstrip() |
|
806 | src = src.rstrip() | |
808 | out['source'] = src |
|
807 | out['source'] = src | |
809 |
|
808 | |||
810 | except Exception: |
|
809 | except Exception: | |
811 | pass |
|
810 | pass | |
812 |
|
811 | |||
813 | # Add docstring only if no source is to be shown (avoid repetitions). |
|
812 | # Add docstring only if no source is to be shown (avoid repetitions). | |
814 | if ds and not self._source_contains_docstring(out.get('source'), ds): |
|
813 | if ds and not self._source_contains_docstring(out.get('source'), ds): | |
815 | out['docstring'] = ds |
|
814 | out['docstring'] = ds | |
816 |
|
815 | |||
817 | # Constructor docstring for classes |
|
816 | # Constructor docstring for classes | |
818 | if inspect.isclass(obj): |
|
817 | if inspect.isclass(obj): | |
819 | out['isclass'] = True |
|
818 | out['isclass'] = True | |
820 |
|
819 | |||
821 | # get the init signature: |
|
820 | # get the init signature: | |
822 | try: |
|
821 | try: | |
823 | init_def = self._getdef(obj, oname) |
|
822 | init_def = self._getdef(obj, oname) | |
824 | except AttributeError: |
|
823 | except AttributeError: | |
825 | init_def = None |
|
824 | init_def = None | |
826 |
|
825 | |||
827 | # get the __init__ docstring |
|
826 | # get the __init__ docstring | |
828 | try: |
|
827 | try: | |
829 | obj_init = obj.__init__ |
|
828 | obj_init = obj.__init__ | |
830 | except AttributeError: |
|
829 | except AttributeError: | |
831 | init_ds = None |
|
830 | init_ds = None | |
832 | else: |
|
831 | else: | |
833 | if init_def is None: |
|
832 | if init_def is None: | |
834 | # Get signature from init if top-level sig failed. |
|
833 | # Get signature from init if top-level sig failed. | |
835 | # Can happen for built-in types (list, etc.). |
|
834 | # Can happen for built-in types (list, etc.). | |
836 | try: |
|
835 | try: | |
837 | init_def = self._getdef(obj_init, oname) |
|
836 | init_def = self._getdef(obj_init, oname) | |
838 | except AttributeError: |
|
837 | except AttributeError: | |
839 | pass |
|
838 | pass | |
840 | init_ds = getdoc(obj_init) |
|
839 | init_ds = getdoc(obj_init) | |
841 | # Skip Python's auto-generated docstrings |
|
840 | # Skip Python's auto-generated docstrings | |
842 | if init_ds == _object_init_docstring: |
|
841 | if init_ds == _object_init_docstring: | |
843 | init_ds = None |
|
842 | init_ds = None | |
844 |
|
843 | |||
845 | if init_def: |
|
844 | if init_def: | |
846 | out['init_definition'] = init_def |
|
845 | out['init_definition'] = init_def | |
847 |
|
846 | |||
848 | if init_ds: |
|
847 | if init_ds: | |
849 | out['init_docstring'] = init_ds |
|
848 | out['init_docstring'] = init_ds | |
850 |
|
849 | |||
851 | names = [sub.__name__ for sub in type.__subclasses__(obj)] |
|
850 | names = [sub.__name__ for sub in type.__subclasses__(obj)] | |
852 | if len(names) < 10: |
|
851 | if len(names) < 10: | |
853 | all_names = ', '.join(names) |
|
852 | all_names = ', '.join(names) | |
854 | else: |
|
853 | else: | |
855 | all_names = ', '.join(names[:10]+['...']) |
|
854 | all_names = ', '.join(names[:10]+['...']) | |
856 | out['subclasses'] = all_names |
|
855 | out['subclasses'] = all_names | |
857 | # and class docstring for instances: |
|
856 | # and class docstring for instances: | |
858 | else: |
|
857 | else: | |
859 | # reconstruct the function definition and print it: |
|
858 | # reconstruct the function definition and print it: | |
860 | defln = self._getdef(obj, oname) |
|
859 | defln = self._getdef(obj, oname) | |
861 | if defln: |
|
860 | if defln: | |
862 | out['definition'] = defln |
|
861 | out['definition'] = defln | |
863 |
|
862 | |||
864 | # First, check whether the instance docstring is identical to the |
|
863 | # First, check whether the instance docstring is identical to the | |
865 | # class one, and print it separately if they don't coincide. In |
|
864 | # class one, and print it separately if they don't coincide. In | |
866 | # most cases they will, but it's nice to print all the info for |
|
865 | # most cases they will, but it's nice to print all the info for | |
867 | # objects which use instance-customized docstrings. |
|
866 | # objects which use instance-customized docstrings. | |
868 | if ds: |
|
867 | if ds: | |
869 | try: |
|
868 | try: | |
870 | cls = getattr(obj,'__class__') |
|
869 | cls = getattr(obj,'__class__') | |
871 | except: |
|
870 | except: | |
872 | class_ds = None |
|
871 | class_ds = None | |
873 | else: |
|
872 | else: | |
874 | class_ds = getdoc(cls) |
|
873 | class_ds = getdoc(cls) | |
875 | # Skip Python's auto-generated docstrings |
|
874 | # Skip Python's auto-generated docstrings | |
876 | if class_ds in _builtin_type_docstrings: |
|
875 | if class_ds in _builtin_type_docstrings: | |
877 | class_ds = None |
|
876 | class_ds = None | |
878 | if class_ds and ds != class_ds: |
|
877 | if class_ds and ds != class_ds: | |
879 | out['class_docstring'] = class_ds |
|
878 | out['class_docstring'] = class_ds | |
880 |
|
879 | |||
881 | # Next, try to show constructor docstrings |
|
880 | # Next, try to show constructor docstrings | |
882 | try: |
|
881 | try: | |
883 | init_ds = getdoc(obj.__init__) |
|
882 | init_ds = getdoc(obj.__init__) | |
884 | # Skip Python's auto-generated docstrings |
|
883 | # Skip Python's auto-generated docstrings | |
885 | if init_ds == _object_init_docstring: |
|
884 | if init_ds == _object_init_docstring: | |
886 | init_ds = None |
|
885 | init_ds = None | |
887 | except AttributeError: |
|
886 | except AttributeError: | |
888 | init_ds = None |
|
887 | init_ds = None | |
889 | if init_ds: |
|
888 | if init_ds: | |
890 | out['init_docstring'] = init_ds |
|
889 | out['init_docstring'] = init_ds | |
891 |
|
890 | |||
892 | # Call form docstring for callable instances |
|
891 | # Call form docstring for callable instances | |
893 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
892 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): | |
894 | call_def = self._getdef(obj.__call__, oname) |
|
893 | call_def = self._getdef(obj.__call__, oname) | |
895 | if call_def and (call_def != out.get('definition')): |
|
894 | if call_def and (call_def != out.get('definition')): | |
896 | # it may never be the case that call def and definition differ, |
|
895 | # it may never be the case that call def and definition differ, | |
897 | # but don't include the same signature twice |
|
896 | # but don't include the same signature twice | |
898 | out['call_def'] = call_def |
|
897 | out['call_def'] = call_def | |
899 | call_ds = getdoc(obj.__call__) |
|
898 | call_ds = getdoc(obj.__call__) | |
900 | # Skip Python's auto-generated docstrings |
|
899 | # Skip Python's auto-generated docstrings | |
901 | if call_ds == _func_call_docstring: |
|
900 | if call_ds == _func_call_docstring: | |
902 | call_ds = None |
|
901 | call_ds = None | |
903 | if call_ds: |
|
902 | if call_ds: | |
904 | out['call_docstring'] = call_ds |
|
903 | out['call_docstring'] = call_ds | |
905 |
|
904 | |||
906 | return object_info(**out) |
|
905 | return object_info(**out) | |
907 |
|
906 | |||
908 | @staticmethod |
|
907 | @staticmethod | |
909 | def _source_contains_docstring(src, doc): |
|
908 | def _source_contains_docstring(src, doc): | |
910 | """ |
|
909 | """ | |
911 | Check whether the source *src* contains the docstring *doc*. |
|
910 | Check whether the source *src* contains the docstring *doc*. | |
912 |
|
911 | |||
913 | This is is helper function to skip displaying the docstring if the |
|
912 | This is is helper function to skip displaying the docstring if the | |
914 | source already contains it, avoiding repetition of information. |
|
913 | source already contains it, avoiding repetition of information. | |
915 | """ |
|
914 | """ | |
916 | try: |
|
915 | try: | |
917 | def_node, = ast.parse(dedent(src)).body |
|
916 | def_node, = ast.parse(dedent(src)).body | |
918 | return ast.get_docstring(def_node) == doc |
|
917 | return ast.get_docstring(def_node) == doc | |
919 | except Exception: |
|
918 | except Exception: | |
920 | # The source can become invalid or even non-existent (because it |
|
919 | # The source can become invalid or even non-existent (because it | |
921 | # is re-fetched from the source file) so the above code fail in |
|
920 | # is re-fetched from the source file) so the above code fail in | |
922 | # arbitrary ways. |
|
921 | # arbitrary ways. | |
923 | return False |
|
922 | return False | |
924 |
|
923 | |||
925 | def psearch(self,pattern,ns_table,ns_search=[], |
|
924 | def psearch(self,pattern,ns_table,ns_search=[], | |
926 | ignore_case=False,show_all=False, *, list_types=False): |
|
925 | ignore_case=False,show_all=False, *, list_types=False): | |
927 | """Search namespaces with wildcards for objects. |
|
926 | """Search namespaces with wildcards for objects. | |
928 |
|
927 | |||
929 | Arguments: |
|
928 | Arguments: | |
930 |
|
929 | |||
931 | - pattern: string containing shell-like wildcards to use in namespace |
|
930 | - pattern: string containing shell-like wildcards to use in namespace | |
932 | searches and optionally a type specification to narrow the search to |
|
931 | searches and optionally a type specification to narrow the search to | |
933 | objects of that type. |
|
932 | objects of that type. | |
934 |
|
933 | |||
935 | - ns_table: dict of name->namespaces for search. |
|
934 | - ns_table: dict of name->namespaces for search. | |
936 |
|
935 | |||
937 | Optional arguments: |
|
936 | Optional arguments: | |
938 |
|
937 | |||
939 | - ns_search: list of namespace names to include in search. |
|
938 | - ns_search: list of namespace names to include in search. | |
940 |
|
939 | |||
941 | - ignore_case(False): make the search case-insensitive. |
|
940 | - ignore_case(False): make the search case-insensitive. | |
942 |
|
941 | |||
943 | - show_all(False): show all names, including those starting with |
|
942 | - show_all(False): show all names, including those starting with | |
944 | underscores. |
|
943 | underscores. | |
945 |
|
944 | |||
946 | - list_types(False): list all available object types for object matching. |
|
945 | - list_types(False): list all available object types for object matching. | |
947 | """ |
|
946 | """ | |
948 | #print 'ps pattern:<%r>' % pattern # dbg |
|
947 | #print 'ps pattern:<%r>' % pattern # dbg | |
949 |
|
948 | |||
950 | # defaults |
|
949 | # defaults | |
951 | type_pattern = 'all' |
|
950 | type_pattern = 'all' | |
952 | filter = '' |
|
951 | filter = '' | |
953 |
|
952 | |||
954 | # list all object types |
|
953 | # list all object types | |
955 | if list_types: |
|
954 | if list_types: | |
956 | page.page('\n'.join(sorted(typestr2type))) |
|
955 | page.page('\n'.join(sorted(typestr2type))) | |
957 | return |
|
956 | return | |
958 |
|
957 | |||
959 | cmds = pattern.split() |
|
958 | cmds = pattern.split() | |
960 | len_cmds = len(cmds) |
|
959 | len_cmds = len(cmds) | |
961 | if len_cmds == 1: |
|
960 | if len_cmds == 1: | |
962 | # Only filter pattern given |
|
961 | # Only filter pattern given | |
963 | filter = cmds[0] |
|
962 | filter = cmds[0] | |
964 | elif len_cmds == 2: |
|
963 | elif len_cmds == 2: | |
965 | # Both filter and type specified |
|
964 | # Both filter and type specified | |
966 | filter,type_pattern = cmds |
|
965 | filter,type_pattern = cmds | |
967 | else: |
|
966 | else: | |
968 | raise ValueError('invalid argument string for psearch: <%s>' % |
|
967 | raise ValueError('invalid argument string for psearch: <%s>' % | |
969 | pattern) |
|
968 | pattern) | |
970 |
|
969 | |||
971 | # filter search namespaces |
|
970 | # filter search namespaces | |
972 | for name in ns_search: |
|
971 | for name in ns_search: | |
973 | if name not in ns_table: |
|
972 | if name not in ns_table: | |
974 | raise ValueError('invalid namespace <%s>. Valid names: %s' % |
|
973 | raise ValueError('invalid namespace <%s>. Valid names: %s' % | |
975 | (name,ns_table.keys())) |
|
974 | (name,ns_table.keys())) | |
976 |
|
975 | |||
977 | #print 'type_pattern:',type_pattern # dbg |
|
976 | #print 'type_pattern:',type_pattern # dbg | |
978 | search_result, namespaces_seen = set(), set() |
|
977 | search_result, namespaces_seen = set(), set() | |
979 | for ns_name in ns_search: |
|
978 | for ns_name in ns_search: | |
980 | ns = ns_table[ns_name] |
|
979 | ns = ns_table[ns_name] | |
981 | # Normally, locals and globals are the same, so we just check one. |
|
980 | # Normally, locals and globals are the same, so we just check one. | |
982 | if id(ns) in namespaces_seen: |
|
981 | if id(ns) in namespaces_seen: | |
983 | continue |
|
982 | continue | |
984 | namespaces_seen.add(id(ns)) |
|
983 | namespaces_seen.add(id(ns)) | |
985 | tmp_res = list_namespace(ns, type_pattern, filter, |
|
984 | tmp_res = list_namespace(ns, type_pattern, filter, | |
986 | ignore_case=ignore_case, show_all=show_all) |
|
985 | ignore_case=ignore_case, show_all=show_all) | |
987 | search_result.update(tmp_res) |
|
986 | search_result.update(tmp_res) | |
988 |
|
987 | |||
989 | page.page('\n'.join(sorted(search_result))) |
|
988 | page.page('\n'.join(sorted(search_result))) | |
990 |
|
989 | |||
991 |
|
990 | |||
992 | def _render_signature(obj_signature, obj_name) -> str: |
|
991 | def _render_signature(obj_signature, obj_name) -> str: | |
993 | """ |
|
992 | """ | |
994 | This was mostly taken from inspect.Signature.__str__. |
|
993 | This was mostly taken from inspect.Signature.__str__. | |
995 | Look there for the comments. |
|
994 | Look there for the comments. | |
996 | The only change is to add linebreaks when this gets too long. |
|
995 | The only change is to add linebreaks when this gets too long. | |
997 | """ |
|
996 | """ | |
998 | result = [] |
|
997 | result = [] | |
999 | pos_only = False |
|
998 | pos_only = False | |
1000 | kw_only = True |
|
999 | kw_only = True | |
1001 | for param in obj_signature.parameters.values(): |
|
1000 | for param in obj_signature.parameters.values(): | |
1002 | if param.kind == inspect._POSITIONAL_ONLY: |
|
1001 | if param.kind == inspect._POSITIONAL_ONLY: | |
1003 | pos_only = True |
|
1002 | pos_only = True | |
1004 | elif pos_only: |
|
1003 | elif pos_only: | |
1005 | result.append('/') |
|
1004 | result.append('/') | |
1006 | pos_only = False |
|
1005 | pos_only = False | |
1007 |
|
1006 | |||
1008 | if param.kind == inspect._VAR_POSITIONAL: |
|
1007 | if param.kind == inspect._VAR_POSITIONAL: | |
1009 | kw_only = False |
|
1008 | kw_only = False | |
1010 | elif param.kind == inspect._KEYWORD_ONLY and kw_only: |
|
1009 | elif param.kind == inspect._KEYWORD_ONLY and kw_only: | |
1011 | result.append('*') |
|
1010 | result.append('*') | |
1012 | kw_only = False |
|
1011 | kw_only = False | |
1013 |
|
1012 | |||
1014 | result.append(str(param)) |
|
1013 | result.append(str(param)) | |
1015 |
|
1014 | |||
1016 | if pos_only: |
|
1015 | if pos_only: | |
1017 | result.append('/') |
|
1016 | result.append('/') | |
1018 |
|
1017 | |||
1019 | # add up name, parameters, braces (2), and commas |
|
1018 | # add up name, parameters, braces (2), and commas | |
1020 | if len(obj_name) + sum(len(r) + 2 for r in result) > 75: |
|
1019 | if len(obj_name) + sum(len(r) + 2 for r in result) > 75: | |
1021 | # This doesnβt fit behind βSignature: β in an inspect window. |
|
1020 | # This doesnβt fit behind βSignature: β in an inspect window. | |
1022 | rendered = '{}(\n{})'.format(obj_name, ''.join( |
|
1021 | rendered = '{}(\n{})'.format(obj_name, ''.join( | |
1023 | ' {},\n'.format(r) for r in result) |
|
1022 | ' {},\n'.format(r) for r in result) | |
1024 | ) |
|
1023 | ) | |
1025 | else: |
|
1024 | else: | |
1026 | rendered = '{}({})'.format(obj_name, ', '.join(result)) |
|
1025 | rendered = '{}({})'.format(obj_name, ', '.join(result)) | |
1027 |
|
1026 | |||
1028 | if obj_signature.return_annotation is not inspect._empty: |
|
1027 | if obj_signature.return_annotation is not inspect._empty: | |
1029 | anno = inspect.formatannotation(obj_signature.return_annotation) |
|
1028 | anno = inspect.formatannotation(obj_signature.return_annotation) | |
1030 | rendered += ' -> {}'.format(anno) |
|
1029 | rendered += ' -> {}'.format(anno) | |
1031 |
|
1030 | |||
1032 | return rendered |
|
1031 | return rendered |
@@ -1,1506 +1,1503 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | Verbose and colourful traceback formatting. |
|
3 | Verbose and colourful traceback formatting. | |
4 |
|
4 | |||
5 | **ColorTB** |
|
5 | **ColorTB** | |
6 |
|
6 | |||
7 | I've always found it a bit hard to visually parse tracebacks in Python. The |
|
7 | I've always found it a bit hard to visually parse tracebacks in Python. The | |
8 | ColorTB class is a solution to that problem. It colors the different parts of a |
|
8 | ColorTB class is a solution to that problem. It colors the different parts of a | |
9 | traceback in a manner similar to what you would expect from a syntax-highlighting |
|
9 | traceback in a manner similar to what you would expect from a syntax-highlighting | |
10 | text editor. |
|
10 | text editor. | |
11 |
|
11 | |||
12 | Installation instructions for ColorTB:: |
|
12 | Installation instructions for ColorTB:: | |
13 |
|
13 | |||
14 | import sys,ultratb |
|
14 | import sys,ultratb | |
15 | sys.excepthook = ultratb.ColorTB() |
|
15 | sys.excepthook = ultratb.ColorTB() | |
16 |
|
16 | |||
17 | **VerboseTB** |
|
17 | **VerboseTB** | |
18 |
|
18 | |||
19 | I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds |
|
19 | I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds | |
20 | of useful info when a traceback occurs. Ping originally had it spit out HTML |
|
20 | of useful info when a traceback occurs. Ping originally had it spit out HTML | |
21 | and intended it for CGI programmers, but why should they have all the fun? I |
|
21 | and intended it for CGI programmers, but why should they have all the fun? I | |
22 | altered it to spit out colored text to the terminal. It's a bit overwhelming, |
|
22 | altered it to spit out colored text to the terminal. It's a bit overwhelming, | |
23 | but kind of neat, and maybe useful for long-running programs that you believe |
|
23 | but kind of neat, and maybe useful for long-running programs that you believe | |
24 | are bug-free. If a crash *does* occur in that type of program you want details. |
|
24 | are bug-free. If a crash *does* occur in that type of program you want details. | |
25 | Give it a shot--you'll love it or you'll hate it. |
|
25 | Give it a shot--you'll love it or you'll hate it. | |
26 |
|
26 | |||
27 | .. note:: |
|
27 | .. note:: | |
28 |
|
28 | |||
29 | The Verbose mode prints the variables currently visible where the exception |
|
29 | The Verbose mode prints the variables currently visible where the exception | |
30 | happened (shortening their strings if too long). This can potentially be |
|
30 | happened (shortening their strings if too long). This can potentially be | |
31 | very slow, if you happen to have a huge data structure whose string |
|
31 | very slow, if you happen to have a huge data structure whose string | |
32 | representation is complex to compute. Your computer may appear to freeze for |
|
32 | representation is complex to compute. Your computer may appear to freeze for | |
33 | a while with cpu usage at 100%. If this occurs, you can cancel the traceback |
|
33 | a while with cpu usage at 100%. If this occurs, you can cancel the traceback | |
34 | with Ctrl-C (maybe hitting it more than once). |
|
34 | with Ctrl-C (maybe hitting it more than once). | |
35 |
|
35 | |||
36 | If you encounter this kind of situation often, you may want to use the |
|
36 | If you encounter this kind of situation often, you may want to use the | |
37 | Verbose_novars mode instead of the regular Verbose, which avoids formatting |
|
37 | Verbose_novars mode instead of the regular Verbose, which avoids formatting | |
38 | variables (but otherwise includes the information and context given by |
|
38 | variables (but otherwise includes the information and context given by | |
39 | Verbose). |
|
39 | Verbose). | |
40 |
|
40 | |||
41 | .. note:: |
|
41 | .. note:: | |
42 |
|
42 | |||
43 | The verbose mode print all variables in the stack, which means it can |
|
43 | The verbose mode print all variables in the stack, which means it can | |
44 | potentially leak sensitive information like access keys, or unencrypted |
|
44 | potentially leak sensitive information like access keys, or unencrypted | |
45 | password. |
|
45 | password. | |
46 |
|
46 | |||
47 | Installation instructions for VerboseTB:: |
|
47 | Installation instructions for VerboseTB:: | |
48 |
|
48 | |||
49 | import sys,ultratb |
|
49 | import sys,ultratb | |
50 | sys.excepthook = ultratb.VerboseTB() |
|
50 | sys.excepthook = ultratb.VerboseTB() | |
51 |
|
51 | |||
52 | Note: Much of the code in this module was lifted verbatim from the standard |
|
52 | Note: Much of the code in this module was lifted verbatim from the standard | |
53 | library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'. |
|
53 | library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'. | |
54 |
|
54 | |||
55 | Color schemes |
|
55 | Color schemes | |
56 | ------------- |
|
56 | ------------- | |
57 |
|
57 | |||
58 | The colors are defined in the class TBTools through the use of the |
|
58 | The colors are defined in the class TBTools through the use of the | |
59 | ColorSchemeTable class. Currently the following exist: |
|
59 | ColorSchemeTable class. Currently the following exist: | |
60 |
|
60 | |||
61 | - NoColor: allows all of this module to be used in any terminal (the color |
|
61 | - NoColor: allows all of this module to be used in any terminal (the color | |
62 | escapes are just dummy blank strings). |
|
62 | escapes are just dummy blank strings). | |
63 |
|
63 | |||
64 | - Linux: is meant to look good in a terminal like the Linux console (black |
|
64 | - Linux: is meant to look good in a terminal like the Linux console (black | |
65 | or very dark background). |
|
65 | or very dark background). | |
66 |
|
66 | |||
67 | - LightBG: similar to Linux but swaps dark/light colors to be more readable |
|
67 | - LightBG: similar to Linux but swaps dark/light colors to be more readable | |
68 | in light background terminals. |
|
68 | in light background terminals. | |
69 |
|
69 | |||
70 | - Neutral: a neutral color scheme that should be readable on both light and |
|
70 | - Neutral: a neutral color scheme that should be readable on both light and | |
71 | dark background |
|
71 | dark background | |
72 |
|
72 | |||
73 | You can implement other color schemes easily, the syntax is fairly |
|
73 | You can implement other color schemes easily, the syntax is fairly | |
74 | self-explanatory. Please send back new schemes you develop to the author for |
|
74 | self-explanatory. Please send back new schemes you develop to the author for | |
75 | possible inclusion in future releases. |
|
75 | possible inclusion in future releases. | |
76 |
|
76 | |||
77 | Inheritance diagram: |
|
77 | Inheritance diagram: | |
78 |
|
78 | |||
79 | .. inheritance-diagram:: IPython.core.ultratb |
|
79 | .. inheritance-diagram:: IPython.core.ultratb | |
80 | :parts: 3 |
|
80 | :parts: 3 | |
81 | """ |
|
81 | """ | |
82 |
|
82 | |||
83 | #***************************************************************************** |
|
83 | #***************************************************************************** | |
84 | # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu> |
|
84 | # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu> | |
85 | # Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu> |
|
85 | # Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu> | |
86 | # |
|
86 | # | |
87 | # Distributed under the terms of the BSD License. The full license is in |
|
87 | # Distributed under the terms of the BSD License. The full license is in | |
88 | # the file COPYING, distributed as part of this software. |
|
88 | # the file COPYING, distributed as part of this software. | |
89 | #***************************************************************************** |
|
89 | #***************************************************************************** | |
90 |
|
90 | |||
91 |
|
91 | |||
92 | import dis |
|
92 | import dis | |
93 | import inspect |
|
93 | import inspect | |
94 | import keyword |
|
94 | import keyword | |
95 | import linecache |
|
95 | import linecache | |
96 | import os |
|
96 | import os | |
97 | import pydoc |
|
97 | import pydoc | |
98 | import re |
|
98 | import re | |
99 | import sys |
|
99 | import sys | |
100 | import time |
|
100 | import time | |
101 | import tokenize |
|
101 | import tokenize | |
102 | import traceback |
|
102 | import traceback | |
103 |
|
103 | |||
104 | try: # Python 2 |
|
104 | from tokenize import generate_tokens | |
105 | generate_tokens = tokenize.generate_tokens |
|
|||
106 | except AttributeError: # Python 3 |
|
|||
107 | generate_tokens = tokenize.tokenize |
|
|||
108 |
|
105 | |||
109 | # For purposes of monkeypatching inspect to fix a bug in it. |
|
106 | # For purposes of monkeypatching inspect to fix a bug in it. | |
110 | from inspect import getsourcefile, getfile, getmodule, \ |
|
107 | from inspect import getsourcefile, getfile, getmodule, \ | |
111 | ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode |
|
108 | ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode | |
112 |
|
109 | |||
113 | # IPython's own modules |
|
110 | # IPython's own modules | |
114 | from IPython import get_ipython |
|
111 | from IPython import get_ipython | |
115 | from IPython.core import debugger |
|
112 | from IPython.core import debugger | |
116 | from IPython.core.display_trap import DisplayTrap |
|
113 | from IPython.core.display_trap import DisplayTrap | |
117 | from IPython.core.excolors import exception_colors |
|
114 | from IPython.core.excolors import exception_colors | |
118 | from IPython.utils import PyColorize |
|
115 | from IPython.utils import PyColorize | |
119 | from IPython.utils import path as util_path |
|
116 | from IPython.utils import path as util_path | |
120 | from IPython.utils import py3compat |
|
117 | from IPython.utils import py3compat | |
121 | from IPython.utils.data import uniq_stable |
|
118 | from IPython.utils.data import uniq_stable | |
122 | from IPython.utils.terminal import get_terminal_size |
|
119 | from IPython.utils.terminal import get_terminal_size | |
123 |
|
120 | |||
124 | from logging import info, error, debug |
|
121 | from logging import info, error, debug | |
125 |
|
122 | |||
126 | from importlib.util import source_from_cache |
|
123 | from importlib.util import source_from_cache | |
127 |
|
124 | |||
128 | import IPython.utils.colorable as colorable |
|
125 | import IPython.utils.colorable as colorable | |
129 |
|
126 | |||
130 | # Globals |
|
127 | # Globals | |
131 | # amount of space to put line numbers before verbose tracebacks |
|
128 | # amount of space to put line numbers before verbose tracebacks | |
132 | INDENT_SIZE = 8 |
|
129 | INDENT_SIZE = 8 | |
133 |
|
130 | |||
134 | # Default color scheme. This is used, for example, by the traceback |
|
131 | # Default color scheme. This is used, for example, by the traceback | |
135 | # formatter. When running in an actual IPython instance, the user's rc.colors |
|
132 | # formatter. When running in an actual IPython instance, the user's rc.colors | |
136 | # value is used, but having a module global makes this functionality available |
|
133 | # value is used, but having a module global makes this functionality available | |
137 | # to users of ultratb who are NOT running inside ipython. |
|
134 | # to users of ultratb who are NOT running inside ipython. | |
138 | DEFAULT_SCHEME = 'NoColor' |
|
135 | DEFAULT_SCHEME = 'NoColor' | |
139 |
|
136 | |||
140 |
|
137 | |||
141 | # Number of frame above which we are likely to have a recursion and will |
|
138 | # Number of frame above which we are likely to have a recursion and will | |
142 | # **attempt** to detect it. Made modifiable mostly to speedup test suite |
|
139 | # **attempt** to detect it. Made modifiable mostly to speedup test suite | |
143 | # as detecting recursion is one of our slowest test |
|
140 | # as detecting recursion is one of our slowest test | |
144 | _FRAME_RECURSION_LIMIT = 500 |
|
141 | _FRAME_RECURSION_LIMIT = 500 | |
145 |
|
142 | |||
146 | # --------------------------------------------------------------------------- |
|
143 | # --------------------------------------------------------------------------- | |
147 | # Code begins |
|
144 | # Code begins | |
148 |
|
145 | |||
149 | # Utility functions |
|
146 | # Utility functions | |
150 | def inspect_error(): |
|
147 | def inspect_error(): | |
151 | """Print a message about internal inspect errors. |
|
148 | """Print a message about internal inspect errors. | |
152 |
|
149 | |||
153 | These are unfortunately quite common.""" |
|
150 | These are unfortunately quite common.""" | |
154 |
|
151 | |||
155 | error('Internal Python error in the inspect module.\n' |
|
152 | error('Internal Python error in the inspect module.\n' | |
156 | 'Below is the traceback from this internal error.\n') |
|
153 | 'Below is the traceback from this internal error.\n') | |
157 |
|
154 | |||
158 |
|
155 | |||
159 | # This function is a monkeypatch we apply to the Python inspect module. We have |
|
156 | # This function is a monkeypatch we apply to the Python inspect module. We have | |
160 | # now found when it's needed (see discussion on issue gh-1456), and we have a |
|
157 | # now found when it's needed (see discussion on issue gh-1456), and we have a | |
161 | # test case (IPython.core.tests.test_ultratb.ChangedPyFileTest) that fails if |
|
158 | # test case (IPython.core.tests.test_ultratb.ChangedPyFileTest) that fails if | |
162 | # the monkeypatch is not applied. TK, Aug 2012. |
|
159 | # the monkeypatch is not applied. TK, Aug 2012. | |
163 | def findsource(object): |
|
160 | def findsource(object): | |
164 | """Return the entire source file and starting line number for an object. |
|
161 | """Return the entire source file and starting line number for an object. | |
165 |
|
162 | |||
166 | The argument may be a module, class, method, function, traceback, frame, |
|
163 | The argument may be a module, class, method, function, traceback, frame, | |
167 | or code object. The source code is returned as a list of all the lines |
|
164 | or code object. The source code is returned as a list of all the lines | |
168 | in the file and the line number indexes a line in that list. An IOError |
|
165 | in the file and the line number indexes a line in that list. An IOError | |
169 | is raised if the source code cannot be retrieved. |
|
166 | is raised if the source code cannot be retrieved. | |
170 |
|
167 | |||
171 | FIXED version with which we monkeypatch the stdlib to work around a bug.""" |
|
168 | FIXED version with which we monkeypatch the stdlib to work around a bug.""" | |
172 |
|
169 | |||
173 | file = getsourcefile(object) or getfile(object) |
|
170 | file = getsourcefile(object) or getfile(object) | |
174 | # If the object is a frame, then trying to get the globals dict from its |
|
171 | # If the object is a frame, then trying to get the globals dict from its | |
175 | # module won't work. Instead, the frame object itself has the globals |
|
172 | # module won't work. Instead, the frame object itself has the globals | |
176 | # dictionary. |
|
173 | # dictionary. | |
177 | globals_dict = None |
|
174 | globals_dict = None | |
178 | if inspect.isframe(object): |
|
175 | if inspect.isframe(object): | |
179 | # XXX: can this ever be false? |
|
176 | # XXX: can this ever be false? | |
180 | globals_dict = object.f_globals |
|
177 | globals_dict = object.f_globals | |
181 | else: |
|
178 | else: | |
182 | module = getmodule(object, file) |
|
179 | module = getmodule(object, file) | |
183 | if module: |
|
180 | if module: | |
184 | globals_dict = module.__dict__ |
|
181 | globals_dict = module.__dict__ | |
185 | lines = linecache.getlines(file, globals_dict) |
|
182 | lines = linecache.getlines(file, globals_dict) | |
186 | if not lines: |
|
183 | if not lines: | |
187 | raise IOError('could not get source code') |
|
184 | raise IOError('could not get source code') | |
188 |
|
185 | |||
189 | if ismodule(object): |
|
186 | if ismodule(object): | |
190 | return lines, 0 |
|
187 | return lines, 0 | |
191 |
|
188 | |||
192 | if isclass(object): |
|
189 | if isclass(object): | |
193 | name = object.__name__ |
|
190 | name = object.__name__ | |
194 | pat = re.compile(r'^(\s*)class\s*' + name + r'\b') |
|
191 | pat = re.compile(r'^(\s*)class\s*' + name + r'\b') | |
195 | # make some effort to find the best matching class definition: |
|
192 | # make some effort to find the best matching class definition: | |
196 | # use the one with the least indentation, which is the one |
|
193 | # use the one with the least indentation, which is the one | |
197 | # that's most probably not inside a function definition. |
|
194 | # that's most probably not inside a function definition. | |
198 | candidates = [] |
|
195 | candidates = [] | |
199 | for i, line in enumerate(lines): |
|
196 | for i, line in enumerate(lines): | |
200 | match = pat.match(line) |
|
197 | match = pat.match(line) | |
201 | if match: |
|
198 | if match: | |
202 | # if it's at toplevel, it's already the best one |
|
199 | # if it's at toplevel, it's already the best one | |
203 | if line[0] == 'c': |
|
200 | if line[0] == 'c': | |
204 | return lines, i |
|
201 | return lines, i | |
205 | # else add whitespace to candidate list |
|
202 | # else add whitespace to candidate list | |
206 | candidates.append((match.group(1), i)) |
|
203 | candidates.append((match.group(1), i)) | |
207 | if candidates: |
|
204 | if candidates: | |
208 | # this will sort by whitespace, and by line number, |
|
205 | # this will sort by whitespace, and by line number, | |
209 | # less whitespace first |
|
206 | # less whitespace first | |
210 | candidates.sort() |
|
207 | candidates.sort() | |
211 | return lines, candidates[0][1] |
|
208 | return lines, candidates[0][1] | |
212 | else: |
|
209 | else: | |
213 | raise IOError('could not find class definition') |
|
210 | raise IOError('could not find class definition') | |
214 |
|
211 | |||
215 | if ismethod(object): |
|
212 | if ismethod(object): | |
216 | object = object.__func__ |
|
213 | object = object.__func__ | |
217 | if isfunction(object): |
|
214 | if isfunction(object): | |
218 | object = object.__code__ |
|
215 | object = object.__code__ | |
219 | if istraceback(object): |
|
216 | if istraceback(object): | |
220 | object = object.tb_frame |
|
217 | object = object.tb_frame | |
221 | if isframe(object): |
|
218 | if isframe(object): | |
222 | object = object.f_code |
|
219 | object = object.f_code | |
223 | if iscode(object): |
|
220 | if iscode(object): | |
224 | if not hasattr(object, 'co_firstlineno'): |
|
221 | if not hasattr(object, 'co_firstlineno'): | |
225 | raise IOError('could not find function definition') |
|
222 | raise IOError('could not find function definition') | |
226 | pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') |
|
223 | pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') | |
227 | pmatch = pat.match |
|
224 | pmatch = pat.match | |
228 | # fperez - fix: sometimes, co_firstlineno can give a number larger than |
|
225 | # fperez - fix: sometimes, co_firstlineno can give a number larger than | |
229 | # the length of lines, which causes an error. Safeguard against that. |
|
226 | # the length of lines, which causes an error. Safeguard against that. | |
230 | lnum = min(object.co_firstlineno, len(lines)) - 1 |
|
227 | lnum = min(object.co_firstlineno, len(lines)) - 1 | |
231 | while lnum > 0: |
|
228 | while lnum > 0: | |
232 | if pmatch(lines[lnum]): |
|
229 | if pmatch(lines[lnum]): | |
233 | break |
|
230 | break | |
234 | lnum -= 1 |
|
231 | lnum -= 1 | |
235 |
|
232 | |||
236 | return lines, lnum |
|
233 | return lines, lnum | |
237 | raise IOError('could not find code object') |
|
234 | raise IOError('could not find code object') | |
238 |
|
235 | |||
239 |
|
236 | |||
240 | # This is a patched version of inspect.getargs that applies the (unmerged) |
|
237 | # This is a patched version of inspect.getargs that applies the (unmerged) | |
241 | # patch for http://bugs.python.org/issue14611 by Stefano Taschini. This fixes |
|
238 | # patch for http://bugs.python.org/issue14611 by Stefano Taschini. This fixes | |
242 | # https://github.com/ipython/ipython/issues/8205 and |
|
239 | # https://github.com/ipython/ipython/issues/8205 and | |
243 | # https://github.com/ipython/ipython/issues/8293 |
|
240 | # https://github.com/ipython/ipython/issues/8293 | |
244 | def getargs(co): |
|
241 | def getargs(co): | |
245 | """Get information about the arguments accepted by a code object. |
|
242 | """Get information about the arguments accepted by a code object. | |
246 |
|
243 | |||
247 | Three things are returned: (args, varargs, varkw), where 'args' is |
|
244 | Three things are returned: (args, varargs, varkw), where 'args' is | |
248 | a list of argument names (possibly containing nested lists), and |
|
245 | a list of argument names (possibly containing nested lists), and | |
249 | 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" |
|
246 | 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" | |
250 | if not iscode(co): |
|
247 | if not iscode(co): | |
251 | raise TypeError('{!r} is not a code object'.format(co)) |
|
248 | raise TypeError('{!r} is not a code object'.format(co)) | |
252 |
|
249 | |||
253 | nargs = co.co_argcount |
|
250 | nargs = co.co_argcount | |
254 | names = co.co_varnames |
|
251 | names = co.co_varnames | |
255 | args = list(names[:nargs]) |
|
252 | args = list(names[:nargs]) | |
256 | step = 0 |
|
253 | step = 0 | |
257 |
|
254 | |||
258 | # The following acrobatics are for anonymous (tuple) arguments. |
|
255 | # The following acrobatics are for anonymous (tuple) arguments. | |
259 | for i in range(nargs): |
|
256 | for i in range(nargs): | |
260 | if args[i][:1] in ('', '.'): |
|
257 | if args[i][:1] in ('', '.'): | |
261 | stack, remain, count = [], [], [] |
|
258 | stack, remain, count = [], [], [] | |
262 | while step < len(co.co_code): |
|
259 | while step < len(co.co_code): | |
263 | op = ord(co.co_code[step]) |
|
260 | op = ord(co.co_code[step]) | |
264 | step = step + 1 |
|
261 | step = step + 1 | |
265 | if op >= dis.HAVE_ARGUMENT: |
|
262 | if op >= dis.HAVE_ARGUMENT: | |
266 | opname = dis.opname[op] |
|
263 | opname = dis.opname[op] | |
267 | value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256 |
|
264 | value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256 | |
268 | step = step + 2 |
|
265 | step = step + 2 | |
269 | if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'): |
|
266 | if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'): | |
270 | remain.append(value) |
|
267 | remain.append(value) | |
271 | count.append(value) |
|
268 | count.append(value) | |
272 | elif opname in ('STORE_FAST', 'STORE_DEREF'): |
|
269 | elif opname in ('STORE_FAST', 'STORE_DEREF'): | |
273 | if op in dis.haslocal: |
|
270 | if op in dis.haslocal: | |
274 | stack.append(co.co_varnames[value]) |
|
271 | stack.append(co.co_varnames[value]) | |
275 | elif op in dis.hasfree: |
|
272 | elif op in dis.hasfree: | |
276 | stack.append((co.co_cellvars + co.co_freevars)[value]) |
|
273 | stack.append((co.co_cellvars + co.co_freevars)[value]) | |
277 | # Special case for sublists of length 1: def foo((bar)) |
|
274 | # Special case for sublists of length 1: def foo((bar)) | |
278 | # doesn't generate the UNPACK_TUPLE bytecode, so if |
|
275 | # doesn't generate the UNPACK_TUPLE bytecode, so if | |
279 | # `remain` is empty here, we have such a sublist. |
|
276 | # `remain` is empty here, we have such a sublist. | |
280 | if not remain: |
|
277 | if not remain: | |
281 | stack[0] = [stack[0]] |
|
278 | stack[0] = [stack[0]] | |
282 | break |
|
279 | break | |
283 | else: |
|
280 | else: | |
284 | remain[-1] = remain[-1] - 1 |
|
281 | remain[-1] = remain[-1] - 1 | |
285 | while remain[-1] == 0: |
|
282 | while remain[-1] == 0: | |
286 | remain.pop() |
|
283 | remain.pop() | |
287 | size = count.pop() |
|
284 | size = count.pop() | |
288 | stack[-size:] = [stack[-size:]] |
|
285 | stack[-size:] = [stack[-size:]] | |
289 | if not remain: |
|
286 | if not remain: | |
290 | break |
|
287 | break | |
291 | remain[-1] = remain[-1] - 1 |
|
288 | remain[-1] = remain[-1] - 1 | |
292 | if not remain: |
|
289 | if not remain: | |
293 | break |
|
290 | break | |
294 | args[i] = stack[0] |
|
291 | args[i] = stack[0] | |
295 |
|
292 | |||
296 | varargs = None |
|
293 | varargs = None | |
297 | if co.co_flags & inspect.CO_VARARGS: |
|
294 | if co.co_flags & inspect.CO_VARARGS: | |
298 | varargs = co.co_varnames[nargs] |
|
295 | varargs = co.co_varnames[nargs] | |
299 | nargs = nargs + 1 |
|
296 | nargs = nargs + 1 | |
300 | varkw = None |
|
297 | varkw = None | |
301 | if co.co_flags & inspect.CO_VARKEYWORDS: |
|
298 | if co.co_flags & inspect.CO_VARKEYWORDS: | |
302 | varkw = co.co_varnames[nargs] |
|
299 | varkw = co.co_varnames[nargs] | |
303 | return inspect.Arguments(args, varargs, varkw) |
|
300 | return inspect.Arguments(args, varargs, varkw) | |
304 |
|
301 | |||
305 |
|
302 | |||
306 | # Monkeypatch inspect to apply our bugfix. |
|
303 | # Monkeypatch inspect to apply our bugfix. | |
307 | def with_patch_inspect(f): |
|
304 | def with_patch_inspect(f): | |
308 | """ |
|
305 | """ | |
309 | Deprecated since IPython 6.0 |
|
306 | Deprecated since IPython 6.0 | |
310 | decorator for monkeypatching inspect.findsource |
|
307 | decorator for monkeypatching inspect.findsource | |
311 | """ |
|
308 | """ | |
312 |
|
309 | |||
313 | def wrapped(*args, **kwargs): |
|
310 | def wrapped(*args, **kwargs): | |
314 | save_findsource = inspect.findsource |
|
311 | save_findsource = inspect.findsource | |
315 | save_getargs = inspect.getargs |
|
312 | save_getargs = inspect.getargs | |
316 | inspect.findsource = findsource |
|
313 | inspect.findsource = findsource | |
317 | inspect.getargs = getargs |
|
314 | inspect.getargs = getargs | |
318 | try: |
|
315 | try: | |
319 | return f(*args, **kwargs) |
|
316 | return f(*args, **kwargs) | |
320 | finally: |
|
317 | finally: | |
321 | inspect.findsource = save_findsource |
|
318 | inspect.findsource = save_findsource | |
322 | inspect.getargs = save_getargs |
|
319 | inspect.getargs = save_getargs | |
323 |
|
320 | |||
324 | return wrapped |
|
321 | return wrapped | |
325 |
|
322 | |||
326 |
|
323 | |||
327 | def fix_frame_records_filenames(records): |
|
324 | def fix_frame_records_filenames(records): | |
328 | """Try to fix the filenames in each record from inspect.getinnerframes(). |
|
325 | """Try to fix the filenames in each record from inspect.getinnerframes(). | |
329 |
|
326 | |||
330 | Particularly, modules loaded from within zip files have useless filenames |
|
327 | Particularly, modules loaded from within zip files have useless filenames | |
331 | attached to their code object, and inspect.getinnerframes() just uses it. |
|
328 | attached to their code object, and inspect.getinnerframes() just uses it. | |
332 | """ |
|
329 | """ | |
333 | fixed_records = [] |
|
330 | fixed_records = [] | |
334 | for frame, filename, line_no, func_name, lines, index in records: |
|
331 | for frame, filename, line_no, func_name, lines, index in records: | |
335 | # Look inside the frame's globals dictionary for __file__, |
|
332 | # Look inside the frame's globals dictionary for __file__, | |
336 | # which should be better. However, keep Cython filenames since |
|
333 | # which should be better. However, keep Cython filenames since | |
337 | # we prefer the source filenames over the compiled .so file. |
|
334 | # we prefer the source filenames over the compiled .so file. | |
338 | if not filename.endswith(('.pyx', '.pxd', '.pxi')): |
|
335 | if not filename.endswith(('.pyx', '.pxd', '.pxi')): | |
339 | better_fn = frame.f_globals.get('__file__', None) |
|
336 | better_fn = frame.f_globals.get('__file__', None) | |
340 | if isinstance(better_fn, str): |
|
337 | if isinstance(better_fn, str): | |
341 | # Check the type just in case someone did something weird with |
|
338 | # Check the type just in case someone did something weird with | |
342 | # __file__. It might also be None if the error occurred during |
|
339 | # __file__. It might also be None if the error occurred during | |
343 | # import. |
|
340 | # import. | |
344 | filename = better_fn |
|
341 | filename = better_fn | |
345 | fixed_records.append((frame, filename, line_no, func_name, lines, index)) |
|
342 | fixed_records.append((frame, filename, line_no, func_name, lines, index)) | |
346 | return fixed_records |
|
343 | return fixed_records | |
347 |
|
344 | |||
348 |
|
345 | |||
349 | @with_patch_inspect |
|
346 | @with_patch_inspect | |
350 | def _fixed_getinnerframes(etb, context=1, tb_offset=0): |
|
347 | def _fixed_getinnerframes(etb, context=1, tb_offset=0): | |
351 | LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5 |
|
348 | LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5 | |
352 |
|
349 | |||
353 | records = fix_frame_records_filenames(inspect.getinnerframes(etb, context)) |
|
350 | records = fix_frame_records_filenames(inspect.getinnerframes(etb, context)) | |
354 | # If the error is at the console, don't build any context, since it would |
|
351 | # If the error is at the console, don't build any context, since it would | |
355 | # otherwise produce 5 blank lines printed out (there is no file at the |
|
352 | # otherwise produce 5 blank lines printed out (there is no file at the | |
356 | # console) |
|
353 | # console) | |
357 | rec_check = records[tb_offset:] |
|
354 | rec_check = records[tb_offset:] | |
358 | try: |
|
355 | try: | |
359 | rname = rec_check[0][1] |
|
356 | rname = rec_check[0][1] | |
360 | if rname == '<ipython console>' or rname.endswith('<string>'): |
|
357 | if rname == '<ipython console>' or rname.endswith('<string>'): | |
361 | return rec_check |
|
358 | return rec_check | |
362 | except IndexError: |
|
359 | except IndexError: | |
363 | pass |
|
360 | pass | |
364 |
|
361 | |||
365 | aux = traceback.extract_tb(etb) |
|
362 | aux = traceback.extract_tb(etb) | |
366 | assert len(records) == len(aux) |
|
363 | assert len(records) == len(aux) | |
367 | for i, (file, lnum, _, _) in enumerate(aux): |
|
364 | for i, (file, lnum, _, _) in enumerate(aux): | |
368 | maybeStart = lnum - 1 - context // 2 |
|
365 | maybeStart = lnum - 1 - context // 2 | |
369 | start = max(maybeStart, 0) |
|
366 | start = max(maybeStart, 0) | |
370 | end = start + context |
|
367 | end = start + context | |
371 | lines = linecache.getlines(file)[start:end] |
|
368 | lines = linecache.getlines(file)[start:end] | |
372 | buf = list(records[i]) |
|
369 | buf = list(records[i]) | |
373 | buf[LNUM_POS] = lnum |
|
370 | buf[LNUM_POS] = lnum | |
374 | buf[INDEX_POS] = lnum - 1 - start |
|
371 | buf[INDEX_POS] = lnum - 1 - start | |
375 | buf[LINES_POS] = lines |
|
372 | buf[LINES_POS] = lines | |
376 | records[i] = tuple(buf) |
|
373 | records[i] = tuple(buf) | |
377 | return records[tb_offset:] |
|
374 | return records[tb_offset:] | |
378 |
|
375 | |||
379 | # Helper function -- largely belongs to VerboseTB, but we need the same |
|
376 | # Helper function -- largely belongs to VerboseTB, but we need the same | |
380 | # functionality to produce a pseudo verbose TB for SyntaxErrors, so that they |
|
377 | # functionality to produce a pseudo verbose TB for SyntaxErrors, so that they | |
381 | # can be recognized properly by ipython.el's py-traceback-line-re |
|
378 | # can be recognized properly by ipython.el's py-traceback-line-re | |
382 | # (SyntaxErrors have to be treated specially because they have no traceback) |
|
379 | # (SyntaxErrors have to be treated specially because they have no traceback) | |
383 |
|
380 | |||
384 |
|
381 | |||
385 | def _format_traceback_lines(lnum, index, lines, Colors, lvals, _line_format): |
|
382 | def _format_traceback_lines(lnum, index, lines, Colors, lvals, _line_format): | |
386 | """ |
|
383 | """ | |
387 | Format tracebacks lines with pointing arrow, leading numbers... |
|
384 | Format tracebacks lines with pointing arrow, leading numbers... | |
388 |
|
385 | |||
389 | Parameters |
|
386 | Parameters | |
390 | ========== |
|
387 | ========== | |
391 |
|
388 | |||
392 | lnum: int |
|
389 | lnum: int | |
393 | index: int |
|
390 | index: int | |
394 | lines: list[string] |
|
391 | lines: list[string] | |
395 | Colors: |
|
392 | Colors: | |
396 | ColorScheme used. |
|
393 | ColorScheme used. | |
397 | lvals: bytes |
|
394 | lvals: bytes | |
398 | Values of local variables, already colored, to inject just after the error line. |
|
395 | Values of local variables, already colored, to inject just after the error line. | |
399 | _line_format: f (str) -> (str, bool) |
|
396 | _line_format: f (str) -> (str, bool) | |
400 | return (colorized version of str, failure to do so) |
|
397 | return (colorized version of str, failure to do so) | |
401 | """ |
|
398 | """ | |
402 | numbers_width = INDENT_SIZE - 1 |
|
399 | numbers_width = INDENT_SIZE - 1 | |
403 | res = [] |
|
400 | res = [] | |
404 |
|
401 | |||
405 | for i,line in enumerate(lines, lnum-index): |
|
402 | for i,line in enumerate(lines, lnum-index): | |
406 | line = py3compat.cast_unicode(line) |
|
403 | line = py3compat.cast_unicode(line) | |
407 |
|
404 | |||
408 | new_line, err = _line_format(line, 'str') |
|
405 | new_line, err = _line_format(line, 'str') | |
409 | if not err: |
|
406 | if not err: | |
410 | line = new_line |
|
407 | line = new_line | |
411 |
|
408 | |||
412 | if i == lnum: |
|
409 | if i == lnum: | |
413 | # This is the line with the error |
|
410 | # This is the line with the error | |
414 | pad = numbers_width - len(str(i)) |
|
411 | pad = numbers_width - len(str(i)) | |
415 | num = '%s%s' % (debugger.make_arrow(pad), str(lnum)) |
|
412 | num = '%s%s' % (debugger.make_arrow(pad), str(lnum)) | |
416 | line = '%s%s%s %s%s' % (Colors.linenoEm, num, |
|
413 | line = '%s%s%s %s%s' % (Colors.linenoEm, num, | |
417 | Colors.line, line, Colors.Normal) |
|
414 | Colors.line, line, Colors.Normal) | |
418 | else: |
|
415 | else: | |
419 | num = '%*s' % (numbers_width, i) |
|
416 | num = '%*s' % (numbers_width, i) | |
420 | line = '%s%s%s %s' % (Colors.lineno, num, |
|
417 | line = '%s%s%s %s' % (Colors.lineno, num, | |
421 | Colors.Normal, line) |
|
418 | Colors.Normal, line) | |
422 |
|
419 | |||
423 | res.append(line) |
|
420 | res.append(line) | |
424 | if lvals and i == lnum: |
|
421 | if lvals and i == lnum: | |
425 | res.append(lvals + '\n') |
|
422 | res.append(lvals + '\n') | |
426 | return res |
|
423 | return res | |
427 |
|
424 | |||
428 | def is_recursion_error(etype, value, records): |
|
425 | def is_recursion_error(etype, value, records): | |
429 | try: |
|
426 | try: | |
430 | # RecursionError is new in Python 3.5 |
|
427 | # RecursionError is new in Python 3.5 | |
431 | recursion_error_type = RecursionError |
|
428 | recursion_error_type = RecursionError | |
432 | except NameError: |
|
429 | except NameError: | |
433 | recursion_error_type = RuntimeError |
|
430 | recursion_error_type = RuntimeError | |
434 |
|
431 | |||
435 | # The default recursion limit is 1000, but some of that will be taken up |
|
432 | # The default recursion limit is 1000, but some of that will be taken up | |
436 | # by stack frames in IPython itself. >500 frames probably indicates |
|
433 | # by stack frames in IPython itself. >500 frames probably indicates | |
437 | # a recursion error. |
|
434 | # a recursion error. | |
438 | return (etype is recursion_error_type) \ |
|
435 | return (etype is recursion_error_type) \ | |
439 | and "recursion" in str(value).lower() \ |
|
436 | and "recursion" in str(value).lower() \ | |
440 | and len(records) > _FRAME_RECURSION_LIMIT |
|
437 | and len(records) > _FRAME_RECURSION_LIMIT | |
441 |
|
438 | |||
442 | def find_recursion(etype, value, records): |
|
439 | def find_recursion(etype, value, records): | |
443 | """Identify the repeating stack frames from a RecursionError traceback |
|
440 | """Identify the repeating stack frames from a RecursionError traceback | |
444 |
|
441 | |||
445 | 'records' is a list as returned by VerboseTB.get_records() |
|
442 | 'records' is a list as returned by VerboseTB.get_records() | |
446 |
|
443 | |||
447 | Returns (last_unique, repeat_length) |
|
444 | Returns (last_unique, repeat_length) | |
448 | """ |
|
445 | """ | |
449 | # This involves a bit of guesswork - we want to show enough of the traceback |
|
446 | # This involves a bit of guesswork - we want to show enough of the traceback | |
450 | # to indicate where the recursion is occurring. We guess that the innermost |
|
447 | # to indicate where the recursion is occurring. We guess that the innermost | |
451 | # quarter of the traceback (250 frames by default) is repeats, and find the |
|
448 | # quarter of the traceback (250 frames by default) is repeats, and find the | |
452 | # first frame (from in to out) that looks different. |
|
449 | # first frame (from in to out) that looks different. | |
453 | if not is_recursion_error(etype, value, records): |
|
450 | if not is_recursion_error(etype, value, records): | |
454 | return len(records), 0 |
|
451 | return len(records), 0 | |
455 |
|
452 | |||
456 | # Select filename, lineno, func_name to track frames with |
|
453 | # Select filename, lineno, func_name to track frames with | |
457 | records = [r[1:4] for r in records] |
|
454 | records = [r[1:4] for r in records] | |
458 | inner_frames = records[-(len(records)//4):] |
|
455 | inner_frames = records[-(len(records)//4):] | |
459 | frames_repeated = set(inner_frames) |
|
456 | frames_repeated = set(inner_frames) | |
460 |
|
457 | |||
461 | last_seen_at = {} |
|
458 | last_seen_at = {} | |
462 | longest_repeat = 0 |
|
459 | longest_repeat = 0 | |
463 | i = len(records) |
|
460 | i = len(records) | |
464 | for frame in reversed(records): |
|
461 | for frame in reversed(records): | |
465 | i -= 1 |
|
462 | i -= 1 | |
466 | if frame not in frames_repeated: |
|
463 | if frame not in frames_repeated: | |
467 | last_unique = i |
|
464 | last_unique = i | |
468 | break |
|
465 | break | |
469 |
|
466 | |||
470 | if frame in last_seen_at: |
|
467 | if frame in last_seen_at: | |
471 | distance = last_seen_at[frame] - i |
|
468 | distance = last_seen_at[frame] - i | |
472 | longest_repeat = max(longest_repeat, distance) |
|
469 | longest_repeat = max(longest_repeat, distance) | |
473 |
|
470 | |||
474 | last_seen_at[frame] = i |
|
471 | last_seen_at[frame] = i | |
475 | else: |
|
472 | else: | |
476 | last_unique = 0 # The whole traceback was recursion |
|
473 | last_unique = 0 # The whole traceback was recursion | |
477 |
|
474 | |||
478 | return last_unique, longest_repeat |
|
475 | return last_unique, longest_repeat | |
479 |
|
476 | |||
480 | #--------------------------------------------------------------------------- |
|
477 | #--------------------------------------------------------------------------- | |
481 | # Module classes |
|
478 | # Module classes | |
482 | class TBTools(colorable.Colorable): |
|
479 | class TBTools(colorable.Colorable): | |
483 | """Basic tools used by all traceback printer classes.""" |
|
480 | """Basic tools used by all traceback printer classes.""" | |
484 |
|
481 | |||
485 | # Number of frames to skip when reporting tracebacks |
|
482 | # Number of frames to skip when reporting tracebacks | |
486 | tb_offset = 0 |
|
483 | tb_offset = 0 | |
487 |
|
484 | |||
488 | def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None): |
|
485 | def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None): | |
489 | # Whether to call the interactive pdb debugger after printing |
|
486 | # Whether to call the interactive pdb debugger after printing | |
490 | # tracebacks or not |
|
487 | # tracebacks or not | |
491 | super(TBTools, self).__init__(parent=parent, config=config) |
|
488 | super(TBTools, self).__init__(parent=parent, config=config) | |
492 | self.call_pdb = call_pdb |
|
489 | self.call_pdb = call_pdb | |
493 |
|
490 | |||
494 | # Output stream to write to. Note that we store the original value in |
|
491 | # Output stream to write to. Note that we store the original value in | |
495 | # a private attribute and then make the public ostream a property, so |
|
492 | # a private attribute and then make the public ostream a property, so | |
496 | # that we can delay accessing sys.stdout until runtime. The way |
|
493 | # that we can delay accessing sys.stdout until runtime. The way | |
497 | # things are written now, the sys.stdout object is dynamically managed |
|
494 | # things are written now, the sys.stdout object is dynamically managed | |
498 | # so a reference to it should NEVER be stored statically. This |
|
495 | # so a reference to it should NEVER be stored statically. This | |
499 | # property approach confines this detail to a single location, and all |
|
496 | # property approach confines this detail to a single location, and all | |
500 | # subclasses can simply access self.ostream for writing. |
|
497 | # subclasses can simply access self.ostream for writing. | |
501 | self._ostream = ostream |
|
498 | self._ostream = ostream | |
502 |
|
499 | |||
503 | # Create color table |
|
500 | # Create color table | |
504 | self.color_scheme_table = exception_colors() |
|
501 | self.color_scheme_table = exception_colors() | |
505 |
|
502 | |||
506 | self.set_colors(color_scheme) |
|
503 | self.set_colors(color_scheme) | |
507 | self.old_scheme = color_scheme # save initial value for toggles |
|
504 | self.old_scheme = color_scheme # save initial value for toggles | |
508 |
|
505 | |||
509 | if call_pdb: |
|
506 | if call_pdb: | |
510 | self.pdb = debugger.Pdb() |
|
507 | self.pdb = debugger.Pdb() | |
511 | else: |
|
508 | else: | |
512 | self.pdb = None |
|
509 | self.pdb = None | |
513 |
|
510 | |||
514 | def _get_ostream(self): |
|
511 | def _get_ostream(self): | |
515 | """Output stream that exceptions are written to. |
|
512 | """Output stream that exceptions are written to. | |
516 |
|
513 | |||
517 | Valid values are: |
|
514 | Valid values are: | |
518 |
|
515 | |||
519 | - None: the default, which means that IPython will dynamically resolve |
|
516 | - None: the default, which means that IPython will dynamically resolve | |
520 | to sys.stdout. This ensures compatibility with most tools, including |
|
517 | to sys.stdout. This ensures compatibility with most tools, including | |
521 | Windows (where plain stdout doesn't recognize ANSI escapes). |
|
518 | Windows (where plain stdout doesn't recognize ANSI escapes). | |
522 |
|
519 | |||
523 | - Any object with 'write' and 'flush' attributes. |
|
520 | - Any object with 'write' and 'flush' attributes. | |
524 | """ |
|
521 | """ | |
525 | return sys.stdout if self._ostream is None else self._ostream |
|
522 | return sys.stdout if self._ostream is None else self._ostream | |
526 |
|
523 | |||
527 | def _set_ostream(self, val): |
|
524 | def _set_ostream(self, val): | |
528 | assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush')) |
|
525 | assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush')) | |
529 | self._ostream = val |
|
526 | self._ostream = val | |
530 |
|
527 | |||
531 | ostream = property(_get_ostream, _set_ostream) |
|
528 | ostream = property(_get_ostream, _set_ostream) | |
532 |
|
529 | |||
533 | def get_parts_of_chained_exception(self, evalue): |
|
530 | def get_parts_of_chained_exception(self, evalue): | |
534 | def get_chained_exception(exception_value): |
|
531 | def get_chained_exception(exception_value): | |
535 | cause = getattr(exception_value, '__cause__', None) |
|
532 | cause = getattr(exception_value, '__cause__', None) | |
536 | if cause: |
|
533 | if cause: | |
537 | return cause |
|
534 | return cause | |
538 | if getattr(exception_value, '__suppress_context__', False): |
|
535 | if getattr(exception_value, '__suppress_context__', False): | |
539 | return None |
|
536 | return None | |
540 | return getattr(exception_value, '__context__', None) |
|
537 | return getattr(exception_value, '__context__', None) | |
541 |
|
538 | |||
542 | chained_evalue = get_chained_exception(evalue) |
|
539 | chained_evalue = get_chained_exception(evalue) | |
543 |
|
540 | |||
544 | if chained_evalue: |
|
541 | if chained_evalue: | |
545 | return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__ |
|
542 | return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__ | |
546 |
|
543 | |||
547 | def prepare_chained_exception_message(self, cause): |
|
544 | def prepare_chained_exception_message(self, cause): | |
548 | direct_cause = "\nThe above exception was the direct cause of the following exception:\n" |
|
545 | direct_cause = "\nThe above exception was the direct cause of the following exception:\n" | |
549 | exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n" |
|
546 | exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n" | |
550 |
|
547 | |||
551 | if cause: |
|
548 | if cause: | |
552 | message = [[direct_cause]] |
|
549 | message = [[direct_cause]] | |
553 | else: |
|
550 | else: | |
554 | message = [[exception_during_handling]] |
|
551 | message = [[exception_during_handling]] | |
555 | return message |
|
552 | return message | |
556 |
|
553 | |||
557 | def set_colors(self, *args, **kw): |
|
554 | def set_colors(self, *args, **kw): | |
558 | """Shorthand access to the color table scheme selector method.""" |
|
555 | """Shorthand access to the color table scheme selector method.""" | |
559 |
|
556 | |||
560 | # Set own color table |
|
557 | # Set own color table | |
561 | self.color_scheme_table.set_active_scheme(*args, **kw) |
|
558 | self.color_scheme_table.set_active_scheme(*args, **kw) | |
562 | # for convenience, set Colors to the active scheme |
|
559 | # for convenience, set Colors to the active scheme | |
563 | self.Colors = self.color_scheme_table.active_colors |
|
560 | self.Colors = self.color_scheme_table.active_colors | |
564 | # Also set colors of debugger |
|
561 | # Also set colors of debugger | |
565 | if hasattr(self, 'pdb') and self.pdb is not None: |
|
562 | if hasattr(self, 'pdb') and self.pdb is not None: | |
566 | self.pdb.set_colors(*args, **kw) |
|
563 | self.pdb.set_colors(*args, **kw) | |
567 |
|
564 | |||
568 | def color_toggle(self): |
|
565 | def color_toggle(self): | |
569 | """Toggle between the currently active color scheme and NoColor.""" |
|
566 | """Toggle between the currently active color scheme and NoColor.""" | |
570 |
|
567 | |||
571 | if self.color_scheme_table.active_scheme_name == 'NoColor': |
|
568 | if self.color_scheme_table.active_scheme_name == 'NoColor': | |
572 | self.color_scheme_table.set_active_scheme(self.old_scheme) |
|
569 | self.color_scheme_table.set_active_scheme(self.old_scheme) | |
573 | self.Colors = self.color_scheme_table.active_colors |
|
570 | self.Colors = self.color_scheme_table.active_colors | |
574 | else: |
|
571 | else: | |
575 | self.old_scheme = self.color_scheme_table.active_scheme_name |
|
572 | self.old_scheme = self.color_scheme_table.active_scheme_name | |
576 | self.color_scheme_table.set_active_scheme('NoColor') |
|
573 | self.color_scheme_table.set_active_scheme('NoColor') | |
577 | self.Colors = self.color_scheme_table.active_colors |
|
574 | self.Colors = self.color_scheme_table.active_colors | |
578 |
|
575 | |||
579 | def stb2text(self, stb): |
|
576 | def stb2text(self, stb): | |
580 | """Convert a structured traceback (a list) to a string.""" |
|
577 | """Convert a structured traceback (a list) to a string.""" | |
581 | return '\n'.join(stb) |
|
578 | return '\n'.join(stb) | |
582 |
|
579 | |||
583 | def text(self, etype, value, tb, tb_offset=None, context=5): |
|
580 | def text(self, etype, value, tb, tb_offset=None, context=5): | |
584 | """Return formatted traceback. |
|
581 | """Return formatted traceback. | |
585 |
|
582 | |||
586 | Subclasses may override this if they add extra arguments. |
|
583 | Subclasses may override this if they add extra arguments. | |
587 | """ |
|
584 | """ | |
588 | tb_list = self.structured_traceback(etype, value, tb, |
|
585 | tb_list = self.structured_traceback(etype, value, tb, | |
589 | tb_offset, context) |
|
586 | tb_offset, context) | |
590 | return self.stb2text(tb_list) |
|
587 | return self.stb2text(tb_list) | |
591 |
|
588 | |||
592 | def structured_traceback(self, etype, evalue, tb, tb_offset=None, |
|
589 | def structured_traceback(self, etype, evalue, tb, tb_offset=None, | |
593 | context=5, mode=None): |
|
590 | context=5, mode=None): | |
594 | """Return a list of traceback frames. |
|
591 | """Return a list of traceback frames. | |
595 |
|
592 | |||
596 | Must be implemented by each class. |
|
593 | Must be implemented by each class. | |
597 | """ |
|
594 | """ | |
598 | raise NotImplementedError() |
|
595 | raise NotImplementedError() | |
599 |
|
596 | |||
600 |
|
597 | |||
601 | #--------------------------------------------------------------------------- |
|
598 | #--------------------------------------------------------------------------- | |
602 | class ListTB(TBTools): |
|
599 | class ListTB(TBTools): | |
603 | """Print traceback information from a traceback list, with optional color. |
|
600 | """Print traceback information from a traceback list, with optional color. | |
604 |
|
601 | |||
605 | Calling requires 3 arguments: (etype, evalue, elist) |
|
602 | Calling requires 3 arguments: (etype, evalue, elist) | |
606 | as would be obtained by:: |
|
603 | as would be obtained by:: | |
607 |
|
604 | |||
608 | etype, evalue, tb = sys.exc_info() |
|
605 | etype, evalue, tb = sys.exc_info() | |
609 | if tb: |
|
606 | if tb: | |
610 | elist = traceback.extract_tb(tb) |
|
607 | elist = traceback.extract_tb(tb) | |
611 | else: |
|
608 | else: | |
612 | elist = None |
|
609 | elist = None | |
613 |
|
610 | |||
614 | It can thus be used by programs which need to process the traceback before |
|
611 | It can thus be used by programs which need to process the traceback before | |
615 | printing (such as console replacements based on the code module from the |
|
612 | printing (such as console replacements based on the code module from the | |
616 | standard library). |
|
613 | standard library). | |
617 |
|
614 | |||
618 | Because they are meant to be called without a full traceback (only a |
|
615 | Because they are meant to be called without a full traceback (only a | |
619 | list), instances of this class can't call the interactive pdb debugger.""" |
|
616 | list), instances of this class can't call the interactive pdb debugger.""" | |
620 |
|
617 | |||
621 | def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None): |
|
618 | def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None): | |
622 | TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, |
|
619 | TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, | |
623 | ostream=ostream, parent=parent,config=config) |
|
620 | ostream=ostream, parent=parent,config=config) | |
624 |
|
621 | |||
625 | def __call__(self, etype, value, elist): |
|
622 | def __call__(self, etype, value, elist): | |
626 | self.ostream.flush() |
|
623 | self.ostream.flush() | |
627 | self.ostream.write(self.text(etype, value, elist)) |
|
624 | self.ostream.write(self.text(etype, value, elist)) | |
628 | self.ostream.write('\n') |
|
625 | self.ostream.write('\n') | |
629 |
|
626 | |||
630 | def _extract_tb(self, tb): |
|
627 | def _extract_tb(self, tb): | |
631 | if tb: |
|
628 | if tb: | |
632 | return traceback.extract_tb(tb) |
|
629 | return traceback.extract_tb(tb) | |
633 | else: |
|
630 | else: | |
634 | return None |
|
631 | return None | |
635 |
|
632 | |||
636 | def structured_traceback(self, etype, evalue, etb=None, tb_offset=None, |
|
633 | def structured_traceback(self, etype, evalue, etb=None, tb_offset=None, | |
637 | context=5): |
|
634 | context=5): | |
638 | """Return a color formatted string with the traceback info. |
|
635 | """Return a color formatted string with the traceback info. | |
639 |
|
636 | |||
640 | Parameters |
|
637 | Parameters | |
641 | ---------- |
|
638 | ---------- | |
642 | etype : exception type |
|
639 | etype : exception type | |
643 | Type of the exception raised. |
|
640 | Type of the exception raised. | |
644 |
|
641 | |||
645 | evalue : object |
|
642 | evalue : object | |
646 | Data stored in the exception |
|
643 | Data stored in the exception | |
647 |
|
644 | |||
648 | etb : object |
|
645 | etb : object | |
649 | If list: List of frames, see class docstring for details. |
|
646 | If list: List of frames, see class docstring for details. | |
650 | If Traceback: Traceback of the exception. |
|
647 | If Traceback: Traceback of the exception. | |
651 |
|
648 | |||
652 | tb_offset : int, optional |
|
649 | tb_offset : int, optional | |
653 | Number of frames in the traceback to skip. If not given, the |
|
650 | Number of frames in the traceback to skip. If not given, the | |
654 | instance evalue is used (set in constructor). |
|
651 | instance evalue is used (set in constructor). | |
655 |
|
652 | |||
656 | context : int, optional |
|
653 | context : int, optional | |
657 | Number of lines of context information to print. |
|
654 | Number of lines of context information to print. | |
658 |
|
655 | |||
659 | Returns |
|
656 | Returns | |
660 | ------- |
|
657 | ------- | |
661 | String with formatted exception. |
|
658 | String with formatted exception. | |
662 | """ |
|
659 | """ | |
663 | # This is a workaround to get chained_exc_ids in recursive calls |
|
660 | # This is a workaround to get chained_exc_ids in recursive calls | |
664 | # etb should not be a tuple if structured_traceback is not recursive |
|
661 | # etb should not be a tuple if structured_traceback is not recursive | |
665 | if isinstance(etb, tuple): |
|
662 | if isinstance(etb, tuple): | |
666 | etb, chained_exc_ids = etb |
|
663 | etb, chained_exc_ids = etb | |
667 | else: |
|
664 | else: | |
668 | chained_exc_ids = set() |
|
665 | chained_exc_ids = set() | |
669 |
|
666 | |||
670 | if isinstance(etb, list): |
|
667 | if isinstance(etb, list): | |
671 | elist = etb |
|
668 | elist = etb | |
672 | elif etb is not None: |
|
669 | elif etb is not None: | |
673 | elist = self._extract_tb(etb) |
|
670 | elist = self._extract_tb(etb) | |
674 | else: |
|
671 | else: | |
675 | elist = [] |
|
672 | elist = [] | |
676 | tb_offset = self.tb_offset if tb_offset is None else tb_offset |
|
673 | tb_offset = self.tb_offset if tb_offset is None else tb_offset | |
677 | Colors = self.Colors |
|
674 | Colors = self.Colors | |
678 | out_list = [] |
|
675 | out_list = [] | |
679 | if elist: |
|
676 | if elist: | |
680 |
|
677 | |||
681 | if tb_offset and len(elist) > tb_offset: |
|
678 | if tb_offset and len(elist) > tb_offset: | |
682 | elist = elist[tb_offset:] |
|
679 | elist = elist[tb_offset:] | |
683 |
|
680 | |||
684 | out_list.append('Traceback %s(most recent call last)%s:' % |
|
681 | out_list.append('Traceback %s(most recent call last)%s:' % | |
685 | (Colors.normalEm, Colors.Normal) + '\n') |
|
682 | (Colors.normalEm, Colors.Normal) + '\n') | |
686 | out_list.extend(self._format_list(elist)) |
|
683 | out_list.extend(self._format_list(elist)) | |
687 | # The exception info should be a single entry in the list. |
|
684 | # The exception info should be a single entry in the list. | |
688 | lines = ''.join(self._format_exception_only(etype, evalue)) |
|
685 | lines = ''.join(self._format_exception_only(etype, evalue)) | |
689 | out_list.append(lines) |
|
686 | out_list.append(lines) | |
690 |
|
687 | |||
691 | exception = self.get_parts_of_chained_exception(evalue) |
|
688 | exception = self.get_parts_of_chained_exception(evalue) | |
692 |
|
689 | |||
693 | if exception and not id(exception[1]) in chained_exc_ids: |
|
690 | if exception and not id(exception[1]) in chained_exc_ids: | |
694 | chained_exception_message = self.prepare_chained_exception_message( |
|
691 | chained_exception_message = self.prepare_chained_exception_message( | |
695 | evalue.__cause__)[0] |
|
692 | evalue.__cause__)[0] | |
696 | etype, evalue, etb = exception |
|
693 | etype, evalue, etb = exception | |
697 | # Trace exception to avoid infinite 'cause' loop |
|
694 | # Trace exception to avoid infinite 'cause' loop | |
698 | chained_exc_ids.add(id(exception[1])) |
|
695 | chained_exc_ids.add(id(exception[1])) | |
699 | chained_exceptions_tb_offset = 0 |
|
696 | chained_exceptions_tb_offset = 0 | |
700 | out_list = ( |
|
697 | out_list = ( | |
701 | self.structured_traceback( |
|
698 | self.structured_traceback( | |
702 | etype, evalue, (etb, chained_exc_ids), |
|
699 | etype, evalue, (etb, chained_exc_ids), | |
703 | chained_exceptions_tb_offset, context) |
|
700 | chained_exceptions_tb_offset, context) | |
704 | + chained_exception_message |
|
701 | + chained_exception_message | |
705 | + out_list) |
|
702 | + out_list) | |
706 |
|
703 | |||
707 | return out_list |
|
704 | return out_list | |
708 |
|
705 | |||
709 | def _format_list(self, extracted_list): |
|
706 | def _format_list(self, extracted_list): | |
710 | """Format a list of traceback entry tuples for printing. |
|
707 | """Format a list of traceback entry tuples for printing. | |
711 |
|
708 | |||
712 | Given a list of tuples as returned by extract_tb() or |
|
709 | Given a list of tuples as returned by extract_tb() or | |
713 | extract_stack(), return a list of strings ready for printing. |
|
710 | extract_stack(), return a list of strings ready for printing. | |
714 | Each string in the resulting list corresponds to the item with the |
|
711 | Each string in the resulting list corresponds to the item with the | |
715 | same index in the argument list. Each string ends in a newline; |
|
712 | same index in the argument list. Each string ends in a newline; | |
716 | the strings may contain internal newlines as well, for those items |
|
713 | the strings may contain internal newlines as well, for those items | |
717 | whose source text line is not None. |
|
714 | whose source text line is not None. | |
718 |
|
715 | |||
719 | Lifted almost verbatim from traceback.py |
|
716 | Lifted almost verbatim from traceback.py | |
720 | """ |
|
717 | """ | |
721 |
|
718 | |||
722 | Colors = self.Colors |
|
719 | Colors = self.Colors | |
723 | list = [] |
|
720 | list = [] | |
724 | for filename, lineno, name, line in extracted_list[:-1]: |
|
721 | for filename, lineno, name, line in extracted_list[:-1]: | |
725 | item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \ |
|
722 | item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \ | |
726 | (Colors.filename, filename, Colors.Normal, |
|
723 | (Colors.filename, filename, Colors.Normal, | |
727 | Colors.lineno, lineno, Colors.Normal, |
|
724 | Colors.lineno, lineno, Colors.Normal, | |
728 | Colors.name, name, Colors.Normal) |
|
725 | Colors.name, name, Colors.Normal) | |
729 | if line: |
|
726 | if line: | |
730 | item += ' %s\n' % line.strip() |
|
727 | item += ' %s\n' % line.strip() | |
731 | list.append(item) |
|
728 | list.append(item) | |
732 | # Emphasize the last entry |
|
729 | # Emphasize the last entry | |
733 | filename, lineno, name, line = extracted_list[-1] |
|
730 | filename, lineno, name, line = extracted_list[-1] | |
734 | item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \ |
|
731 | item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \ | |
735 | (Colors.normalEm, |
|
732 | (Colors.normalEm, | |
736 | Colors.filenameEm, filename, Colors.normalEm, |
|
733 | Colors.filenameEm, filename, Colors.normalEm, | |
737 | Colors.linenoEm, lineno, Colors.normalEm, |
|
734 | Colors.linenoEm, lineno, Colors.normalEm, | |
738 | Colors.nameEm, name, Colors.normalEm, |
|
735 | Colors.nameEm, name, Colors.normalEm, | |
739 | Colors.Normal) |
|
736 | Colors.Normal) | |
740 | if line: |
|
737 | if line: | |
741 | item += '%s %s%s\n' % (Colors.line, line.strip(), |
|
738 | item += '%s %s%s\n' % (Colors.line, line.strip(), | |
742 | Colors.Normal) |
|
739 | Colors.Normal) | |
743 | list.append(item) |
|
740 | list.append(item) | |
744 | return list |
|
741 | return list | |
745 |
|
742 | |||
746 | def _format_exception_only(self, etype, value): |
|
743 | def _format_exception_only(self, etype, value): | |
747 | """Format the exception part of a traceback. |
|
744 | """Format the exception part of a traceback. | |
748 |
|
745 | |||
749 | The arguments are the exception type and value such as given by |
|
746 | The arguments are the exception type and value such as given by | |
750 | sys.exc_info()[:2]. The return value is a list of strings, each ending |
|
747 | sys.exc_info()[:2]. The return value is a list of strings, each ending | |
751 | in a newline. Normally, the list contains a single string; however, |
|
748 | in a newline. Normally, the list contains a single string; however, | |
752 | for SyntaxError exceptions, it contains several lines that (when |
|
749 | for SyntaxError exceptions, it contains several lines that (when | |
753 | printed) display detailed information about where the syntax error |
|
750 | printed) display detailed information about where the syntax error | |
754 | occurred. The message indicating which exception occurred is the |
|
751 | occurred. The message indicating which exception occurred is the | |
755 | always last string in the list. |
|
752 | always last string in the list. | |
756 |
|
753 | |||
757 | Also lifted nearly verbatim from traceback.py |
|
754 | Also lifted nearly verbatim from traceback.py | |
758 | """ |
|
755 | """ | |
759 | have_filedata = False |
|
756 | have_filedata = False | |
760 | Colors = self.Colors |
|
757 | Colors = self.Colors | |
761 | list = [] |
|
758 | list = [] | |
762 | stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal) |
|
759 | stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal) | |
763 | if value is None: |
|
760 | if value is None: | |
764 | # Not sure if this can still happen in Python 2.6 and above |
|
761 | # Not sure if this can still happen in Python 2.6 and above | |
765 | list.append(stype + '\n') |
|
762 | list.append(stype + '\n') | |
766 | else: |
|
763 | else: | |
767 | if issubclass(etype, SyntaxError): |
|
764 | if issubclass(etype, SyntaxError): | |
768 | have_filedata = True |
|
765 | have_filedata = True | |
769 | if not value.filename: value.filename = "<string>" |
|
766 | if not value.filename: value.filename = "<string>" | |
770 | if value.lineno: |
|
767 | if value.lineno: | |
771 | lineno = value.lineno |
|
768 | lineno = value.lineno | |
772 | textline = linecache.getline(value.filename, value.lineno) |
|
769 | textline = linecache.getline(value.filename, value.lineno) | |
773 | else: |
|
770 | else: | |
774 | lineno = 'unknown' |
|
771 | lineno = 'unknown' | |
775 | textline = '' |
|
772 | textline = '' | |
776 | list.append('%s File %s"%s"%s, line %s%s%s\n' % \ |
|
773 | list.append('%s File %s"%s"%s, line %s%s%s\n' % \ | |
777 | (Colors.normalEm, |
|
774 | (Colors.normalEm, | |
778 | Colors.filenameEm, py3compat.cast_unicode(value.filename), Colors.normalEm, |
|
775 | Colors.filenameEm, py3compat.cast_unicode(value.filename), Colors.normalEm, | |
779 | Colors.linenoEm, lineno, Colors.Normal )) |
|
776 | Colors.linenoEm, lineno, Colors.Normal )) | |
780 | if textline == '': |
|
777 | if textline == '': | |
781 | textline = py3compat.cast_unicode(value.text, "utf-8") |
|
778 | textline = py3compat.cast_unicode(value.text, "utf-8") | |
782 |
|
779 | |||
783 | if textline is not None: |
|
780 | if textline is not None: | |
784 | i = 0 |
|
781 | i = 0 | |
785 | while i < len(textline) and textline[i].isspace(): |
|
782 | while i < len(textline) and textline[i].isspace(): | |
786 | i += 1 |
|
783 | i += 1 | |
787 | list.append('%s %s%s\n' % (Colors.line, |
|
784 | list.append('%s %s%s\n' % (Colors.line, | |
788 | textline.strip(), |
|
785 | textline.strip(), | |
789 | Colors.Normal)) |
|
786 | Colors.Normal)) | |
790 | if value.offset is not None: |
|
787 | if value.offset is not None: | |
791 | s = ' ' |
|
788 | s = ' ' | |
792 | for c in textline[i:value.offset - 1]: |
|
789 | for c in textline[i:value.offset - 1]: | |
793 | if c.isspace(): |
|
790 | if c.isspace(): | |
794 | s += c |
|
791 | s += c | |
795 | else: |
|
792 | else: | |
796 | s += ' ' |
|
793 | s += ' ' | |
797 | list.append('%s%s^%s\n' % (Colors.caret, s, |
|
794 | list.append('%s%s^%s\n' % (Colors.caret, s, | |
798 | Colors.Normal)) |
|
795 | Colors.Normal)) | |
799 |
|
796 | |||
800 | try: |
|
797 | try: | |
801 | s = value.msg |
|
798 | s = value.msg | |
802 | except Exception: |
|
799 | except Exception: | |
803 | s = self._some_str(value) |
|
800 | s = self._some_str(value) | |
804 | if s: |
|
801 | if s: | |
805 | list.append('%s%s:%s %s\n' % (stype, Colors.excName, |
|
802 | list.append('%s%s:%s %s\n' % (stype, Colors.excName, | |
806 | Colors.Normal, s)) |
|
803 | Colors.Normal, s)) | |
807 | else: |
|
804 | else: | |
808 | list.append('%s\n' % stype) |
|
805 | list.append('%s\n' % stype) | |
809 |
|
806 | |||
810 | # sync with user hooks |
|
807 | # sync with user hooks | |
811 | if have_filedata: |
|
808 | if have_filedata: | |
812 | ipinst = get_ipython() |
|
809 | ipinst = get_ipython() | |
813 | if ipinst is not None: |
|
810 | if ipinst is not None: | |
814 | ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0) |
|
811 | ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0) | |
815 |
|
812 | |||
816 | return list |
|
813 | return list | |
817 |
|
814 | |||
818 | def get_exception_only(self, etype, value): |
|
815 | def get_exception_only(self, etype, value): | |
819 | """Only print the exception type and message, without a traceback. |
|
816 | """Only print the exception type and message, without a traceback. | |
820 |
|
817 | |||
821 | Parameters |
|
818 | Parameters | |
822 | ---------- |
|
819 | ---------- | |
823 | etype : exception type |
|
820 | etype : exception type | |
824 | value : exception value |
|
821 | value : exception value | |
825 | """ |
|
822 | """ | |
826 | return ListTB.structured_traceback(self, etype, value) |
|
823 | return ListTB.structured_traceback(self, etype, value) | |
827 |
|
824 | |||
828 | def show_exception_only(self, etype, evalue): |
|
825 | def show_exception_only(self, etype, evalue): | |
829 | """Only print the exception type and message, without a traceback. |
|
826 | """Only print the exception type and message, without a traceback. | |
830 |
|
827 | |||
831 | Parameters |
|
828 | Parameters | |
832 | ---------- |
|
829 | ---------- | |
833 | etype : exception type |
|
830 | etype : exception type | |
834 | value : exception value |
|
831 | value : exception value | |
835 | """ |
|
832 | """ | |
836 | # This method needs to use __call__ from *this* class, not the one from |
|
833 | # This method needs to use __call__ from *this* class, not the one from | |
837 | # a subclass whose signature or behavior may be different |
|
834 | # a subclass whose signature or behavior may be different | |
838 | ostream = self.ostream |
|
835 | ostream = self.ostream | |
839 | ostream.flush() |
|
836 | ostream.flush() | |
840 | ostream.write('\n'.join(self.get_exception_only(etype, evalue))) |
|
837 | ostream.write('\n'.join(self.get_exception_only(etype, evalue))) | |
841 | ostream.flush() |
|
838 | ostream.flush() | |
842 |
|
839 | |||
843 | def _some_str(self, value): |
|
840 | def _some_str(self, value): | |
844 | # Lifted from traceback.py |
|
841 | # Lifted from traceback.py | |
845 | try: |
|
842 | try: | |
846 | return py3compat.cast_unicode(str(value)) |
|
843 | return py3compat.cast_unicode(str(value)) | |
847 | except: |
|
844 | except: | |
848 | return u'<unprintable %s object>' % type(value).__name__ |
|
845 | return u'<unprintable %s object>' % type(value).__name__ | |
849 |
|
846 | |||
850 |
|
847 | |||
851 | #---------------------------------------------------------------------------- |
|
848 | #---------------------------------------------------------------------------- | |
852 | class VerboseTB(TBTools): |
|
849 | class VerboseTB(TBTools): | |
853 | """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead |
|
850 | """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead | |
854 | of HTML. Requires inspect and pydoc. Crazy, man. |
|
851 | of HTML. Requires inspect and pydoc. Crazy, man. | |
855 |
|
852 | |||
856 | Modified version which optionally strips the topmost entries from the |
|
853 | Modified version which optionally strips the topmost entries from the | |
857 | traceback, to be used with alternate interpreters (because their own code |
|
854 | traceback, to be used with alternate interpreters (because their own code | |
858 | would appear in the traceback).""" |
|
855 | would appear in the traceback).""" | |
859 |
|
856 | |||
860 | def __init__(self, color_scheme='Linux', call_pdb=False, ostream=None, |
|
857 | def __init__(self, color_scheme='Linux', call_pdb=False, ostream=None, | |
861 | tb_offset=0, long_header=False, include_vars=True, |
|
858 | tb_offset=0, long_header=False, include_vars=True, | |
862 | check_cache=None, debugger_cls = None, |
|
859 | check_cache=None, debugger_cls = None, | |
863 | parent=None, config=None): |
|
860 | parent=None, config=None): | |
864 | """Specify traceback offset, headers and color scheme. |
|
861 | """Specify traceback offset, headers and color scheme. | |
865 |
|
862 | |||
866 | Define how many frames to drop from the tracebacks. Calling it with |
|
863 | Define how many frames to drop from the tracebacks. Calling it with | |
867 | tb_offset=1 allows use of this handler in interpreters which will have |
|
864 | tb_offset=1 allows use of this handler in interpreters which will have | |
868 | their own code at the top of the traceback (VerboseTB will first |
|
865 | their own code at the top of the traceback (VerboseTB will first | |
869 | remove that frame before printing the traceback info).""" |
|
866 | remove that frame before printing the traceback info).""" | |
870 | TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, |
|
867 | TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, | |
871 | ostream=ostream, parent=parent, config=config) |
|
868 | ostream=ostream, parent=parent, config=config) | |
872 | self.tb_offset = tb_offset |
|
869 | self.tb_offset = tb_offset | |
873 | self.long_header = long_header |
|
870 | self.long_header = long_header | |
874 | self.include_vars = include_vars |
|
871 | self.include_vars = include_vars | |
875 | # By default we use linecache.checkcache, but the user can provide a |
|
872 | # By default we use linecache.checkcache, but the user can provide a | |
876 | # different check_cache implementation. This is used by the IPython |
|
873 | # different check_cache implementation. This is used by the IPython | |
877 | # kernel to provide tracebacks for interactive code that is cached, |
|
874 | # kernel to provide tracebacks for interactive code that is cached, | |
878 | # by a compiler instance that flushes the linecache but preserves its |
|
875 | # by a compiler instance that flushes the linecache but preserves its | |
879 | # own code cache. |
|
876 | # own code cache. | |
880 | if check_cache is None: |
|
877 | if check_cache is None: | |
881 | check_cache = linecache.checkcache |
|
878 | check_cache = linecache.checkcache | |
882 | self.check_cache = check_cache |
|
879 | self.check_cache = check_cache | |
883 |
|
880 | |||
884 | self.debugger_cls = debugger_cls or debugger.Pdb |
|
881 | self.debugger_cls = debugger_cls or debugger.Pdb | |
885 |
|
882 | |||
886 | def format_records(self, records, last_unique, recursion_repeat): |
|
883 | def format_records(self, records, last_unique, recursion_repeat): | |
887 | """Format the stack frames of the traceback""" |
|
884 | """Format the stack frames of the traceback""" | |
888 | frames = [] |
|
885 | frames = [] | |
889 | for r in records[:last_unique+recursion_repeat+1]: |
|
886 | for r in records[:last_unique+recursion_repeat+1]: | |
890 | #print '*** record:',file,lnum,func,lines,index # dbg |
|
887 | #print '*** record:',file,lnum,func,lines,index # dbg | |
891 | frames.append(self.format_record(*r)) |
|
888 | frames.append(self.format_record(*r)) | |
892 |
|
889 | |||
893 | if recursion_repeat: |
|
890 | if recursion_repeat: | |
894 | frames.append('... last %d frames repeated, from the frame below ...\n' % recursion_repeat) |
|
891 | frames.append('... last %d frames repeated, from the frame below ...\n' % recursion_repeat) | |
895 | frames.append(self.format_record(*records[last_unique+recursion_repeat+1])) |
|
892 | frames.append(self.format_record(*records[last_unique+recursion_repeat+1])) | |
896 |
|
893 | |||
897 | return frames |
|
894 | return frames | |
898 |
|
895 | |||
899 | def format_record(self, frame, file, lnum, func, lines, index): |
|
896 | def format_record(self, frame, file, lnum, func, lines, index): | |
900 | """Format a single stack frame""" |
|
897 | """Format a single stack frame""" | |
901 | Colors = self.Colors # just a shorthand + quicker name lookup |
|
898 | Colors = self.Colors # just a shorthand + quicker name lookup | |
902 | ColorsNormal = Colors.Normal # used a lot |
|
899 | ColorsNormal = Colors.Normal # used a lot | |
903 | col_scheme = self.color_scheme_table.active_scheme_name |
|
900 | col_scheme = self.color_scheme_table.active_scheme_name | |
904 | indent = ' ' * INDENT_SIZE |
|
901 | indent = ' ' * INDENT_SIZE | |
905 | em_normal = '%s\n%s%s' % (Colors.valEm, indent, ColorsNormal) |
|
902 | em_normal = '%s\n%s%s' % (Colors.valEm, indent, ColorsNormal) | |
906 | undefined = '%sundefined%s' % (Colors.em, ColorsNormal) |
|
903 | undefined = '%sundefined%s' % (Colors.em, ColorsNormal) | |
907 | tpl_link = '%s%%s%s' % (Colors.filenameEm, ColorsNormal) |
|
904 | tpl_link = '%s%%s%s' % (Colors.filenameEm, ColorsNormal) | |
908 | tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm, |
|
905 | tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm, | |
909 | ColorsNormal) |
|
906 | ColorsNormal) | |
910 | tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \ |
|
907 | tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \ | |
911 | (Colors.vName, Colors.valEm, ColorsNormal) |
|
908 | (Colors.vName, Colors.valEm, ColorsNormal) | |
912 | tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal) |
|
909 | tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal) | |
913 | tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal, |
|
910 | tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal, | |
914 | Colors.vName, ColorsNormal) |
|
911 | Colors.vName, ColorsNormal) | |
915 | tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal) |
|
912 | tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal) | |
916 |
|
913 | |||
917 | if not file: |
|
914 | if not file: | |
918 | file = '?' |
|
915 | file = '?' | |
919 | elif file.startswith(str("<")) and file.endswith(str(">")): |
|
916 | elif file.startswith(str("<")) and file.endswith(str(">")): | |
920 | # Not a real filename, no problem... |
|
917 | # Not a real filename, no problem... | |
921 | pass |
|
918 | pass | |
922 | elif not os.path.isabs(file): |
|
919 | elif not os.path.isabs(file): | |
923 | # Try to make the filename absolute by trying all |
|
920 | # Try to make the filename absolute by trying all | |
924 | # sys.path entries (which is also what linecache does) |
|
921 | # sys.path entries (which is also what linecache does) | |
925 | for dirname in sys.path: |
|
922 | for dirname in sys.path: | |
926 | try: |
|
923 | try: | |
927 | fullname = os.path.join(dirname, file) |
|
924 | fullname = os.path.join(dirname, file) | |
928 | if os.path.isfile(fullname): |
|
925 | if os.path.isfile(fullname): | |
929 | file = os.path.abspath(fullname) |
|
926 | file = os.path.abspath(fullname) | |
930 | break |
|
927 | break | |
931 | except Exception: |
|
928 | except Exception: | |
932 | # Just in case that sys.path contains very |
|
929 | # Just in case that sys.path contains very | |
933 | # strange entries... |
|
930 | # strange entries... | |
934 | pass |
|
931 | pass | |
935 |
|
932 | |||
936 | file = py3compat.cast_unicode(file, util_path.fs_encoding) |
|
933 | file = py3compat.cast_unicode(file, util_path.fs_encoding) | |
937 | link = tpl_link % util_path.compress_user(file) |
|
934 | link = tpl_link % util_path.compress_user(file) | |
938 | args, varargs, varkw, locals_ = inspect.getargvalues(frame) |
|
935 | args, varargs, varkw, locals_ = inspect.getargvalues(frame) | |
939 |
|
936 | |||
940 | if func == '?': |
|
937 | if func == '?': | |
941 | call = '' |
|
938 | call = '' | |
942 | elif func == '<module>': |
|
939 | elif func == '<module>': | |
943 | call = tpl_call % (func, '') |
|
940 | call = tpl_call % (func, '') | |
944 | else: |
|
941 | else: | |
945 | # Decide whether to include variable details or not |
|
942 | # Decide whether to include variable details or not | |
946 | var_repr = eqrepr if self.include_vars else nullrepr |
|
943 | var_repr = eqrepr if self.include_vars else nullrepr | |
947 | try: |
|
944 | try: | |
948 | call = tpl_call % (func, inspect.formatargvalues(args, |
|
945 | call = tpl_call % (func, inspect.formatargvalues(args, | |
949 | varargs, varkw, |
|
946 | varargs, varkw, | |
950 | locals_, formatvalue=var_repr)) |
|
947 | locals_, formatvalue=var_repr)) | |
951 | except KeyError: |
|
948 | except KeyError: | |
952 | # This happens in situations like errors inside generator |
|
949 | # This happens in situations like errors inside generator | |
953 | # expressions, where local variables are listed in the |
|
950 | # expressions, where local variables are listed in the | |
954 | # line, but can't be extracted from the frame. I'm not |
|
951 | # line, but can't be extracted from the frame. I'm not | |
955 | # 100% sure this isn't actually a bug in inspect itself, |
|
952 | # 100% sure this isn't actually a bug in inspect itself, | |
956 | # but since there's no info for us to compute with, the |
|
953 | # but since there's no info for us to compute with, the | |
957 | # best we can do is report the failure and move on. Here |
|
954 | # best we can do is report the failure and move on. Here | |
958 | # we must *not* call any traceback construction again, |
|
955 | # we must *not* call any traceback construction again, | |
959 | # because that would mess up use of %debug later on. So we |
|
956 | # because that would mess up use of %debug later on. So we | |
960 | # simply report the failure and move on. The only |
|
957 | # simply report the failure and move on. The only | |
961 | # limitation will be that this frame won't have locals |
|
958 | # limitation will be that this frame won't have locals | |
962 | # listed in the call signature. Quite subtle problem... |
|
959 | # listed in the call signature. Quite subtle problem... | |
963 | # I can't think of a good way to validate this in a unit |
|
960 | # I can't think of a good way to validate this in a unit | |
964 | # test, but running a script consisting of: |
|
961 | # test, but running a script consisting of: | |
965 | # dict( (k,v.strip()) for (k,v) in range(10) ) |
|
962 | # dict( (k,v.strip()) for (k,v) in range(10) ) | |
966 | # will illustrate the error, if this exception catch is |
|
963 | # will illustrate the error, if this exception catch is | |
967 | # disabled. |
|
964 | # disabled. | |
968 | call = tpl_call_fail % func |
|
965 | call = tpl_call_fail % func | |
969 |
|
966 | |||
970 | # Don't attempt to tokenize binary files. |
|
967 | # Don't attempt to tokenize binary files. | |
971 | if file.endswith(('.so', '.pyd', '.dll')): |
|
968 | if file.endswith(('.so', '.pyd', '.dll')): | |
972 | return '%s %s\n' % (link, call) |
|
969 | return '%s %s\n' % (link, call) | |
973 |
|
970 | |||
974 | elif file.endswith(('.pyc', '.pyo')): |
|
971 | elif file.endswith(('.pyc', '.pyo')): | |
975 | # Look up the corresponding source file. |
|
972 | # Look up the corresponding source file. | |
976 | try: |
|
973 | try: | |
977 | file = source_from_cache(file) |
|
974 | file = source_from_cache(file) | |
978 | except ValueError: |
|
975 | except ValueError: | |
979 | # Failed to get the source file for some reason |
|
976 | # Failed to get the source file for some reason | |
980 | # E.g. https://github.com/ipython/ipython/issues/9486 |
|
977 | # E.g. https://github.com/ipython/ipython/issues/9486 | |
981 | return '%s %s\n' % (link, call) |
|
978 | return '%s %s\n' % (link, call) | |
982 |
|
979 | |||
983 | def linereader(file=file, lnum=[lnum], getline=linecache.getline): |
|
980 | def linereader(file=file, lnum=[lnum], getline=linecache.getline): | |
984 | line = getline(file, lnum[0]) |
|
981 | line = getline(file, lnum[0]) | |
985 | lnum[0] += 1 |
|
982 | lnum[0] += 1 | |
986 | return line |
|
983 | return line | |
987 |
|
984 | |||
988 | # Build the list of names on this line of code where the exception |
|
985 | # Build the list of names on this line of code where the exception | |
989 | # occurred. |
|
986 | # occurred. | |
990 | try: |
|
987 | try: | |
991 | names = [] |
|
988 | names = [] | |
992 | name_cont = False |
|
989 | name_cont = False | |
993 |
|
990 | |||
994 | for token_type, token, start, end, line in generate_tokens(linereader): |
|
991 | for token_type, token, start, end, line in generate_tokens(linereader): | |
995 | # build composite names |
|
992 | # build composite names | |
996 | if token_type == tokenize.NAME and token not in keyword.kwlist: |
|
993 | if token_type == tokenize.NAME and token not in keyword.kwlist: | |
997 | if name_cont: |
|
994 | if name_cont: | |
998 | # Continuation of a dotted name |
|
995 | # Continuation of a dotted name | |
999 | try: |
|
996 | try: | |
1000 | names[-1].append(token) |
|
997 | names[-1].append(token) | |
1001 | except IndexError: |
|
998 | except IndexError: | |
1002 | names.append([token]) |
|
999 | names.append([token]) | |
1003 | name_cont = False |
|
1000 | name_cont = False | |
1004 | else: |
|
1001 | else: | |
1005 | # Regular new names. We append everything, the caller |
|
1002 | # Regular new names. We append everything, the caller | |
1006 | # will be responsible for pruning the list later. It's |
|
1003 | # will be responsible for pruning the list later. It's | |
1007 | # very tricky to try to prune as we go, b/c composite |
|
1004 | # very tricky to try to prune as we go, b/c composite | |
1008 | # names can fool us. The pruning at the end is easy |
|
1005 | # names can fool us. The pruning at the end is easy | |
1009 | # to do (or the caller can print a list with repeated |
|
1006 | # to do (or the caller can print a list with repeated | |
1010 | # names if so desired. |
|
1007 | # names if so desired. | |
1011 | names.append([token]) |
|
1008 | names.append([token]) | |
1012 | elif token == '.': |
|
1009 | elif token == '.': | |
1013 | name_cont = True |
|
1010 | name_cont = True | |
1014 | elif token_type == tokenize.NEWLINE: |
|
1011 | elif token_type == tokenize.NEWLINE: | |
1015 | break |
|
1012 | break | |
1016 |
|
1013 | |||
1017 | except (IndexError, UnicodeDecodeError, SyntaxError): |
|
1014 | except (IndexError, UnicodeDecodeError, SyntaxError): | |
1018 | # signals exit of tokenizer |
|
1015 | # signals exit of tokenizer | |
1019 | # SyntaxError can occur if the file is not actually Python |
|
1016 | # SyntaxError can occur if the file is not actually Python | |
1020 | # - see gh-6300 |
|
1017 | # - see gh-6300 | |
1021 | pass |
|
1018 | pass | |
1022 | except tokenize.TokenError as msg: |
|
1019 | except tokenize.TokenError as msg: | |
1023 | # Tokenizing may fail for various reasons, many of which are |
|
1020 | # Tokenizing may fail for various reasons, many of which are | |
1024 | # harmless. (A good example is when the line in question is the |
|
1021 | # harmless. (A good example is when the line in question is the | |
1025 | # close of a triple-quoted string, cf gh-6864). We don't want to |
|
1022 | # close of a triple-quoted string, cf gh-6864). We don't want to | |
1026 | # show this to users, but want make it available for debugging |
|
1023 | # show this to users, but want make it available for debugging | |
1027 | # purposes. |
|
1024 | # purposes. | |
1028 | _m = ("An unexpected error occurred while tokenizing input\n" |
|
1025 | _m = ("An unexpected error occurred while tokenizing input\n" | |
1029 | "The following traceback may be corrupted or invalid\n" |
|
1026 | "The following traceback may be corrupted or invalid\n" | |
1030 | "The error message is: %s\n" % msg) |
|
1027 | "The error message is: %s\n" % msg) | |
1031 | debug(_m) |
|
1028 | debug(_m) | |
1032 |
|
1029 | |||
1033 | # Join composite names (e.g. "dict.fromkeys") |
|
1030 | # Join composite names (e.g. "dict.fromkeys") | |
1034 | names = ['.'.join(n) for n in names] |
|
1031 | names = ['.'.join(n) for n in names] | |
1035 | # prune names list of duplicates, but keep the right order |
|
1032 | # prune names list of duplicates, but keep the right order | |
1036 | unique_names = uniq_stable(names) |
|
1033 | unique_names = uniq_stable(names) | |
1037 |
|
1034 | |||
1038 | # Start loop over vars |
|
1035 | # Start loop over vars | |
1039 | lvals = '' |
|
1036 | lvals = '' | |
1040 | lvals_list = [] |
|
1037 | lvals_list = [] | |
1041 | if self.include_vars: |
|
1038 | if self.include_vars: | |
1042 | for name_full in unique_names: |
|
1039 | for name_full in unique_names: | |
1043 | name_base = name_full.split('.', 1)[0] |
|
1040 | name_base = name_full.split('.', 1)[0] | |
1044 | if name_base in frame.f_code.co_varnames: |
|
1041 | if name_base in frame.f_code.co_varnames: | |
1045 | if name_base in locals_: |
|
1042 | if name_base in locals_: | |
1046 | try: |
|
1043 | try: | |
1047 | value = repr(eval(name_full, locals_)) |
|
1044 | value = repr(eval(name_full, locals_)) | |
1048 | except: |
|
1045 | except: | |
1049 | value = undefined |
|
1046 | value = undefined | |
1050 | else: |
|
1047 | else: | |
1051 | value = undefined |
|
1048 | value = undefined | |
1052 | name = tpl_local_var % name_full |
|
1049 | name = tpl_local_var % name_full | |
1053 | else: |
|
1050 | else: | |
1054 | if name_base in frame.f_globals: |
|
1051 | if name_base in frame.f_globals: | |
1055 | try: |
|
1052 | try: | |
1056 | value = repr(eval(name_full, frame.f_globals)) |
|
1053 | value = repr(eval(name_full, frame.f_globals)) | |
1057 | except: |
|
1054 | except: | |
1058 | value = undefined |
|
1055 | value = undefined | |
1059 | else: |
|
1056 | else: | |
1060 | value = undefined |
|
1057 | value = undefined | |
1061 | name = tpl_global_var % name_full |
|
1058 | name = tpl_global_var % name_full | |
1062 | lvals_list.append(tpl_name_val % (name, value)) |
|
1059 | lvals_list.append(tpl_name_val % (name, value)) | |
1063 | if lvals_list: |
|
1060 | if lvals_list: | |
1064 | lvals = '%s%s' % (indent, em_normal.join(lvals_list)) |
|
1061 | lvals = '%s%s' % (indent, em_normal.join(lvals_list)) | |
1065 |
|
1062 | |||
1066 | level = '%s %s\n' % (link, call) |
|
1063 | level = '%s %s\n' % (link, call) | |
1067 |
|
1064 | |||
1068 | if index is None: |
|
1065 | if index is None: | |
1069 | return level |
|
1066 | return level | |
1070 | else: |
|
1067 | else: | |
1071 | _line_format = PyColorize.Parser(style=col_scheme, parent=self).format2 |
|
1068 | _line_format = PyColorize.Parser(style=col_scheme, parent=self).format2 | |
1072 | return '%s%s' % (level, ''.join( |
|
1069 | return '%s%s' % (level, ''.join( | |
1073 | _format_traceback_lines(lnum, index, lines, Colors, lvals, |
|
1070 | _format_traceback_lines(lnum, index, lines, Colors, lvals, | |
1074 | _line_format))) |
|
1071 | _line_format))) | |
1075 |
|
1072 | |||
1076 | def prepare_header(self, etype, long_version=False): |
|
1073 | def prepare_header(self, etype, long_version=False): | |
1077 | colors = self.Colors # just a shorthand + quicker name lookup |
|
1074 | colors = self.Colors # just a shorthand + quicker name lookup | |
1078 | colorsnormal = colors.Normal # used a lot |
|
1075 | colorsnormal = colors.Normal # used a lot | |
1079 | exc = '%s%s%s' % (colors.excName, etype, colorsnormal) |
|
1076 | exc = '%s%s%s' % (colors.excName, etype, colorsnormal) | |
1080 | width = min(75, get_terminal_size()[0]) |
|
1077 | width = min(75, get_terminal_size()[0]) | |
1081 | if long_version: |
|
1078 | if long_version: | |
1082 | # Header with the exception type, python version, and date |
|
1079 | # Header with the exception type, python version, and date | |
1083 | pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable |
|
1080 | pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable | |
1084 | date = time.ctime(time.time()) |
|
1081 | date = time.ctime(time.time()) | |
1085 |
|
1082 | |||
1086 | head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal, |
|
1083 | head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal, | |
1087 | exc, ' ' * (width - len(str(etype)) - len(pyver)), |
|
1084 | exc, ' ' * (width - len(str(etype)) - len(pyver)), | |
1088 | pyver, date.rjust(width) ) |
|
1085 | pyver, date.rjust(width) ) | |
1089 | head += "\nA problem occurred executing Python code. Here is the sequence of function" \ |
|
1086 | head += "\nA problem occurred executing Python code. Here is the sequence of function" \ | |
1090 | "\ncalls leading up to the error, with the most recent (innermost) call last." |
|
1087 | "\ncalls leading up to the error, with the most recent (innermost) call last." | |
1091 | else: |
|
1088 | else: | |
1092 | # Simplified header |
|
1089 | # Simplified header | |
1093 | head = '%s%s' % (exc, 'Traceback (most recent call last)'. \ |
|
1090 | head = '%s%s' % (exc, 'Traceback (most recent call last)'. \ | |
1094 | rjust(width - len(str(etype))) ) |
|
1091 | rjust(width - len(str(etype))) ) | |
1095 |
|
1092 | |||
1096 | return head |
|
1093 | return head | |
1097 |
|
1094 | |||
1098 | def format_exception(self, etype, evalue): |
|
1095 | def format_exception(self, etype, evalue): | |
1099 | colors = self.Colors # just a shorthand + quicker name lookup |
|
1096 | colors = self.Colors # just a shorthand + quicker name lookup | |
1100 | colorsnormal = colors.Normal # used a lot |
|
1097 | colorsnormal = colors.Normal # used a lot | |
1101 | # Get (safely) a string form of the exception info |
|
1098 | # Get (safely) a string form of the exception info | |
1102 | try: |
|
1099 | try: | |
1103 | etype_str, evalue_str = map(str, (etype, evalue)) |
|
1100 | etype_str, evalue_str = map(str, (etype, evalue)) | |
1104 | except: |
|
1101 | except: | |
1105 | # User exception is improperly defined. |
|
1102 | # User exception is improperly defined. | |
1106 | etype, evalue = str, sys.exc_info()[:2] |
|
1103 | etype, evalue = str, sys.exc_info()[:2] | |
1107 | etype_str, evalue_str = map(str, (etype, evalue)) |
|
1104 | etype_str, evalue_str = map(str, (etype, evalue)) | |
1108 | # ... and format it |
|
1105 | # ... and format it | |
1109 | return ['%s%s%s: %s' % (colors.excName, etype_str, |
|
1106 | return ['%s%s%s: %s' % (colors.excName, etype_str, | |
1110 | colorsnormal, py3compat.cast_unicode(evalue_str))] |
|
1107 | colorsnormal, py3compat.cast_unicode(evalue_str))] | |
1111 |
|
1108 | |||
1112 | def format_exception_as_a_whole(self, etype, evalue, etb, number_of_lines_of_context, tb_offset): |
|
1109 | def format_exception_as_a_whole(self, etype, evalue, etb, number_of_lines_of_context, tb_offset): | |
1113 | """Formats the header, traceback and exception message for a single exception. |
|
1110 | """Formats the header, traceback and exception message for a single exception. | |
1114 |
|
1111 | |||
1115 | This may be called multiple times by Python 3 exception chaining |
|
1112 | This may be called multiple times by Python 3 exception chaining | |
1116 | (PEP 3134). |
|
1113 | (PEP 3134). | |
1117 | """ |
|
1114 | """ | |
1118 | # some locals |
|
1115 | # some locals | |
1119 | orig_etype = etype |
|
1116 | orig_etype = etype | |
1120 | try: |
|
1117 | try: | |
1121 | etype = etype.__name__ |
|
1118 | etype = etype.__name__ | |
1122 | except AttributeError: |
|
1119 | except AttributeError: | |
1123 | pass |
|
1120 | pass | |
1124 |
|
1121 | |||
1125 | tb_offset = self.tb_offset if tb_offset is None else tb_offset |
|
1122 | tb_offset = self.tb_offset if tb_offset is None else tb_offset | |
1126 | head = self.prepare_header(etype, self.long_header) |
|
1123 | head = self.prepare_header(etype, self.long_header) | |
1127 | records = self.get_records(etb, number_of_lines_of_context, tb_offset) |
|
1124 | records = self.get_records(etb, number_of_lines_of_context, tb_offset) | |
1128 |
|
1125 | |||
1129 | if records is None: |
|
1126 | if records is None: | |
1130 | return "" |
|
1127 | return "" | |
1131 |
|
1128 | |||
1132 | last_unique, recursion_repeat = find_recursion(orig_etype, evalue, records) |
|
1129 | last_unique, recursion_repeat = find_recursion(orig_etype, evalue, records) | |
1133 |
|
1130 | |||
1134 | frames = self.format_records(records, last_unique, recursion_repeat) |
|
1131 | frames = self.format_records(records, last_unique, recursion_repeat) | |
1135 |
|
1132 | |||
1136 | formatted_exception = self.format_exception(etype, evalue) |
|
1133 | formatted_exception = self.format_exception(etype, evalue) | |
1137 | if records: |
|
1134 | if records: | |
1138 | filepath, lnum = records[-1][1:3] |
|
1135 | filepath, lnum = records[-1][1:3] | |
1139 | filepath = os.path.abspath(filepath) |
|
1136 | filepath = os.path.abspath(filepath) | |
1140 | ipinst = get_ipython() |
|
1137 | ipinst = get_ipython() | |
1141 | if ipinst is not None: |
|
1138 | if ipinst is not None: | |
1142 | ipinst.hooks.synchronize_with_editor(filepath, lnum, 0) |
|
1139 | ipinst.hooks.synchronize_with_editor(filepath, lnum, 0) | |
1143 |
|
1140 | |||
1144 | return [[head] + frames + [''.join(formatted_exception[0])]] |
|
1141 | return [[head] + frames + [''.join(formatted_exception[0])]] | |
1145 |
|
1142 | |||
1146 | def get_records(self, etb, number_of_lines_of_context, tb_offset): |
|
1143 | def get_records(self, etb, number_of_lines_of_context, tb_offset): | |
1147 | try: |
|
1144 | try: | |
1148 | # Try the default getinnerframes and Alex's: Alex's fixes some |
|
1145 | # Try the default getinnerframes and Alex's: Alex's fixes some | |
1149 | # problems, but it generates empty tracebacks for console errors |
|
1146 | # problems, but it generates empty tracebacks for console errors | |
1150 | # (5 blanks lines) where none should be returned. |
|
1147 | # (5 blanks lines) where none should be returned. | |
1151 | return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset) |
|
1148 | return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset) | |
1152 | except UnicodeDecodeError: |
|
1149 | except UnicodeDecodeError: | |
1153 | # This can occur if a file's encoding magic comment is wrong. |
|
1150 | # This can occur if a file's encoding magic comment is wrong. | |
1154 | # I can't see a way to recover without duplicating a bunch of code |
|
1151 | # I can't see a way to recover without duplicating a bunch of code | |
1155 | # from the stdlib traceback module. --TK |
|
1152 | # from the stdlib traceback module. --TK | |
1156 | error('\nUnicodeDecodeError while processing traceback.\n') |
|
1153 | error('\nUnicodeDecodeError while processing traceback.\n') | |
1157 | return None |
|
1154 | return None | |
1158 | except: |
|
1155 | except: | |
1159 | # FIXME: I've been getting many crash reports from python 2.3 |
|
1156 | # FIXME: I've been getting many crash reports from python 2.3 | |
1160 | # users, traceable to inspect.py. If I can find a small test-case |
|
1157 | # users, traceable to inspect.py. If I can find a small test-case | |
1161 | # to reproduce this, I should either write a better workaround or |
|
1158 | # to reproduce this, I should either write a better workaround or | |
1162 | # file a bug report against inspect (if that's the real problem). |
|
1159 | # file a bug report against inspect (if that's the real problem). | |
1163 | # So far, I haven't been able to find an isolated example to |
|
1160 | # So far, I haven't been able to find an isolated example to | |
1164 | # reproduce the problem. |
|
1161 | # reproduce the problem. | |
1165 | inspect_error() |
|
1162 | inspect_error() | |
1166 | traceback.print_exc(file=self.ostream) |
|
1163 | traceback.print_exc(file=self.ostream) | |
1167 | info('\nUnfortunately, your original traceback can not be constructed.\n') |
|
1164 | info('\nUnfortunately, your original traceback can not be constructed.\n') | |
1168 | return None |
|
1165 | return None | |
1169 |
|
1166 | |||
1170 | def structured_traceback(self, etype, evalue, etb, tb_offset=None, |
|
1167 | def structured_traceback(self, etype, evalue, etb, tb_offset=None, | |
1171 | number_of_lines_of_context=5): |
|
1168 | number_of_lines_of_context=5): | |
1172 | """Return a nice text document describing the traceback.""" |
|
1169 | """Return a nice text document describing the traceback.""" | |
1173 |
|
1170 | |||
1174 | formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context, |
|
1171 | formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context, | |
1175 | tb_offset) |
|
1172 | tb_offset) | |
1176 |
|
1173 | |||
1177 | colors = self.Colors # just a shorthand + quicker name lookup |
|
1174 | colors = self.Colors # just a shorthand + quicker name lookup | |
1178 | colorsnormal = colors.Normal # used a lot |
|
1175 | colorsnormal = colors.Normal # used a lot | |
1179 | head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal) |
|
1176 | head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal) | |
1180 | structured_traceback_parts = [head] |
|
1177 | structured_traceback_parts = [head] | |
1181 | chained_exceptions_tb_offset = 0 |
|
1178 | chained_exceptions_tb_offset = 0 | |
1182 | lines_of_context = 3 |
|
1179 | lines_of_context = 3 | |
1183 | formatted_exceptions = formatted_exception |
|
1180 | formatted_exceptions = formatted_exception | |
1184 | exception = self.get_parts_of_chained_exception(evalue) |
|
1181 | exception = self.get_parts_of_chained_exception(evalue) | |
1185 | if exception: |
|
1182 | if exception: | |
1186 | formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__) |
|
1183 | formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__) | |
1187 | etype, evalue, etb = exception |
|
1184 | etype, evalue, etb = exception | |
1188 | else: |
|
1185 | else: | |
1189 | evalue = None |
|
1186 | evalue = None | |
1190 | chained_exc_ids = set() |
|
1187 | chained_exc_ids = set() | |
1191 | while evalue: |
|
1188 | while evalue: | |
1192 | formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context, |
|
1189 | formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context, | |
1193 | chained_exceptions_tb_offset) |
|
1190 | chained_exceptions_tb_offset) | |
1194 | exception = self.get_parts_of_chained_exception(evalue) |
|
1191 | exception = self.get_parts_of_chained_exception(evalue) | |
1195 |
|
1192 | |||
1196 | if exception and not id(exception[1]) in chained_exc_ids: |
|
1193 | if exception and not id(exception[1]) in chained_exc_ids: | |
1197 | chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop |
|
1194 | chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop | |
1198 | formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__) |
|
1195 | formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__) | |
1199 | etype, evalue, etb = exception |
|
1196 | etype, evalue, etb = exception | |
1200 | else: |
|
1197 | else: | |
1201 | evalue = None |
|
1198 | evalue = None | |
1202 |
|
1199 | |||
1203 | # we want to see exceptions in a reversed order: |
|
1200 | # we want to see exceptions in a reversed order: | |
1204 | # the first exception should be on top |
|
1201 | # the first exception should be on top | |
1205 | for formatted_exception in reversed(formatted_exceptions): |
|
1202 | for formatted_exception in reversed(formatted_exceptions): | |
1206 | structured_traceback_parts += formatted_exception |
|
1203 | structured_traceback_parts += formatted_exception | |
1207 |
|
1204 | |||
1208 | return structured_traceback_parts |
|
1205 | return structured_traceback_parts | |
1209 |
|
1206 | |||
1210 | def debugger(self, force=False): |
|
1207 | def debugger(self, force=False): | |
1211 | """Call up the pdb debugger if desired, always clean up the tb |
|
1208 | """Call up the pdb debugger if desired, always clean up the tb | |
1212 | reference. |
|
1209 | reference. | |
1213 |
|
1210 | |||
1214 | Keywords: |
|
1211 | Keywords: | |
1215 |
|
1212 | |||
1216 | - force(False): by default, this routine checks the instance call_pdb |
|
1213 | - force(False): by default, this routine checks the instance call_pdb | |
1217 | flag and does not actually invoke the debugger if the flag is false. |
|
1214 | flag and does not actually invoke the debugger if the flag is false. | |
1218 | The 'force' option forces the debugger to activate even if the flag |
|
1215 | The 'force' option forces the debugger to activate even if the flag | |
1219 | is false. |
|
1216 | is false. | |
1220 |
|
1217 | |||
1221 | If the call_pdb flag is set, the pdb interactive debugger is |
|
1218 | If the call_pdb flag is set, the pdb interactive debugger is | |
1222 | invoked. In all cases, the self.tb reference to the current traceback |
|
1219 | invoked. In all cases, the self.tb reference to the current traceback | |
1223 | is deleted to prevent lingering references which hamper memory |
|
1220 | is deleted to prevent lingering references which hamper memory | |
1224 | management. |
|
1221 | management. | |
1225 |
|
1222 | |||
1226 | Note that each call to pdb() does an 'import readline', so if your app |
|
1223 | Note that each call to pdb() does an 'import readline', so if your app | |
1227 | requires a special setup for the readline completers, you'll have to |
|
1224 | requires a special setup for the readline completers, you'll have to | |
1228 | fix that by hand after invoking the exception handler.""" |
|
1225 | fix that by hand after invoking the exception handler.""" | |
1229 |
|
1226 | |||
1230 | if force or self.call_pdb: |
|
1227 | if force or self.call_pdb: | |
1231 | if self.pdb is None: |
|
1228 | if self.pdb is None: | |
1232 | self.pdb = self.debugger_cls() |
|
1229 | self.pdb = self.debugger_cls() | |
1233 | # the system displayhook may have changed, restore the original |
|
1230 | # the system displayhook may have changed, restore the original | |
1234 | # for pdb |
|
1231 | # for pdb | |
1235 | display_trap = DisplayTrap(hook=sys.__displayhook__) |
|
1232 | display_trap = DisplayTrap(hook=sys.__displayhook__) | |
1236 | with display_trap: |
|
1233 | with display_trap: | |
1237 | self.pdb.reset() |
|
1234 | self.pdb.reset() | |
1238 | # Find the right frame so we don't pop up inside ipython itself |
|
1235 | # Find the right frame so we don't pop up inside ipython itself | |
1239 | if hasattr(self, 'tb') and self.tb is not None: |
|
1236 | if hasattr(self, 'tb') and self.tb is not None: | |
1240 | etb = self.tb |
|
1237 | etb = self.tb | |
1241 | else: |
|
1238 | else: | |
1242 | etb = self.tb = sys.last_traceback |
|
1239 | etb = self.tb = sys.last_traceback | |
1243 | while self.tb is not None and self.tb.tb_next is not None: |
|
1240 | while self.tb is not None and self.tb.tb_next is not None: | |
1244 | self.tb = self.tb.tb_next |
|
1241 | self.tb = self.tb.tb_next | |
1245 | if etb and etb.tb_next: |
|
1242 | if etb and etb.tb_next: | |
1246 | etb = etb.tb_next |
|
1243 | etb = etb.tb_next | |
1247 | self.pdb.botframe = etb.tb_frame |
|
1244 | self.pdb.botframe = etb.tb_frame | |
1248 | self.pdb.interaction(None, etb) |
|
1245 | self.pdb.interaction(None, etb) | |
1249 |
|
1246 | |||
1250 | if hasattr(self, 'tb'): |
|
1247 | if hasattr(self, 'tb'): | |
1251 | del self.tb |
|
1248 | del self.tb | |
1252 |
|
1249 | |||
1253 | def handler(self, info=None): |
|
1250 | def handler(self, info=None): | |
1254 | (etype, evalue, etb) = info or sys.exc_info() |
|
1251 | (etype, evalue, etb) = info or sys.exc_info() | |
1255 | self.tb = etb |
|
1252 | self.tb = etb | |
1256 | ostream = self.ostream |
|
1253 | ostream = self.ostream | |
1257 | ostream.flush() |
|
1254 | ostream.flush() | |
1258 | ostream.write(self.text(etype, evalue, etb)) |
|
1255 | ostream.write(self.text(etype, evalue, etb)) | |
1259 | ostream.write('\n') |
|
1256 | ostream.write('\n') | |
1260 | ostream.flush() |
|
1257 | ostream.flush() | |
1261 |
|
1258 | |||
1262 | # Changed so an instance can just be called as VerboseTB_inst() and print |
|
1259 | # Changed so an instance can just be called as VerboseTB_inst() and print | |
1263 | # out the right info on its own. |
|
1260 | # out the right info on its own. | |
1264 | def __call__(self, etype=None, evalue=None, etb=None): |
|
1261 | def __call__(self, etype=None, evalue=None, etb=None): | |
1265 | """This hook can replace sys.excepthook (for Python 2.1 or higher).""" |
|
1262 | """This hook can replace sys.excepthook (for Python 2.1 or higher).""" | |
1266 | if etb is None: |
|
1263 | if etb is None: | |
1267 | self.handler() |
|
1264 | self.handler() | |
1268 | else: |
|
1265 | else: | |
1269 | self.handler((etype, evalue, etb)) |
|
1266 | self.handler((etype, evalue, etb)) | |
1270 | try: |
|
1267 | try: | |
1271 | self.debugger() |
|
1268 | self.debugger() | |
1272 | except KeyboardInterrupt: |
|
1269 | except KeyboardInterrupt: | |
1273 | print("\nKeyboardInterrupt") |
|
1270 | print("\nKeyboardInterrupt") | |
1274 |
|
1271 | |||
1275 |
|
1272 | |||
1276 | #---------------------------------------------------------------------------- |
|
1273 | #---------------------------------------------------------------------------- | |
1277 | class FormattedTB(VerboseTB, ListTB): |
|
1274 | class FormattedTB(VerboseTB, ListTB): | |
1278 | """Subclass ListTB but allow calling with a traceback. |
|
1275 | """Subclass ListTB but allow calling with a traceback. | |
1279 |
|
1276 | |||
1280 | It can thus be used as a sys.excepthook for Python > 2.1. |
|
1277 | It can thus be used as a sys.excepthook for Python > 2.1. | |
1281 |
|
1278 | |||
1282 | Also adds 'Context' and 'Verbose' modes, not available in ListTB. |
|
1279 | Also adds 'Context' and 'Verbose' modes, not available in ListTB. | |
1283 |
|
1280 | |||
1284 | Allows a tb_offset to be specified. This is useful for situations where |
|
1281 | Allows a tb_offset to be specified. This is useful for situations where | |
1285 | one needs to remove a number of topmost frames from the traceback (such as |
|
1282 | one needs to remove a number of topmost frames from the traceback (such as | |
1286 | occurs with python programs that themselves execute other python code, |
|
1283 | occurs with python programs that themselves execute other python code, | |
1287 | like Python shells). """ |
|
1284 | like Python shells). """ | |
1288 |
|
1285 | |||
1289 | def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False, |
|
1286 | def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False, | |
1290 | ostream=None, |
|
1287 | ostream=None, | |
1291 | tb_offset=0, long_header=False, include_vars=False, |
|
1288 | tb_offset=0, long_header=False, include_vars=False, | |
1292 | check_cache=None, debugger_cls=None, |
|
1289 | check_cache=None, debugger_cls=None, | |
1293 | parent=None, config=None): |
|
1290 | parent=None, config=None): | |
1294 |
|
1291 | |||
1295 | # NEVER change the order of this list. Put new modes at the end: |
|
1292 | # NEVER change the order of this list. Put new modes at the end: | |
1296 | self.valid_modes = ['Plain', 'Context', 'Verbose', 'Minimal'] |
|
1293 | self.valid_modes = ['Plain', 'Context', 'Verbose', 'Minimal'] | |
1297 | self.verbose_modes = self.valid_modes[1:3] |
|
1294 | self.verbose_modes = self.valid_modes[1:3] | |
1298 |
|
1295 | |||
1299 | VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, |
|
1296 | VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb, | |
1300 | ostream=ostream, tb_offset=tb_offset, |
|
1297 | ostream=ostream, tb_offset=tb_offset, | |
1301 | long_header=long_header, include_vars=include_vars, |
|
1298 | long_header=long_header, include_vars=include_vars, | |
1302 | check_cache=check_cache, debugger_cls=debugger_cls, |
|
1299 | check_cache=check_cache, debugger_cls=debugger_cls, | |
1303 | parent=parent, config=config) |
|
1300 | parent=parent, config=config) | |
1304 |
|
1301 | |||
1305 | # Different types of tracebacks are joined with different separators to |
|
1302 | # Different types of tracebacks are joined with different separators to | |
1306 | # form a single string. They are taken from this dict |
|
1303 | # form a single string. They are taken from this dict | |
1307 | self._join_chars = dict(Plain='', Context='\n', Verbose='\n', |
|
1304 | self._join_chars = dict(Plain='', Context='\n', Verbose='\n', | |
1308 | Minimal='') |
|
1305 | Minimal='') | |
1309 | # set_mode also sets the tb_join_char attribute |
|
1306 | # set_mode also sets the tb_join_char attribute | |
1310 | self.set_mode(mode) |
|
1307 | self.set_mode(mode) | |
1311 |
|
1308 | |||
1312 | def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5): |
|
1309 | def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5): | |
1313 | tb_offset = self.tb_offset if tb_offset is None else tb_offset |
|
1310 | tb_offset = self.tb_offset if tb_offset is None else tb_offset | |
1314 | mode = self.mode |
|
1311 | mode = self.mode | |
1315 | if mode in self.verbose_modes: |
|
1312 | if mode in self.verbose_modes: | |
1316 | # Verbose modes need a full traceback |
|
1313 | # Verbose modes need a full traceback | |
1317 | return VerboseTB.structured_traceback( |
|
1314 | return VerboseTB.structured_traceback( | |
1318 | self, etype, value, tb, tb_offset, number_of_lines_of_context |
|
1315 | self, etype, value, tb, tb_offset, number_of_lines_of_context | |
1319 | ) |
|
1316 | ) | |
1320 | elif mode == 'Minimal': |
|
1317 | elif mode == 'Minimal': | |
1321 | return ListTB.get_exception_only(self, etype, value) |
|
1318 | return ListTB.get_exception_only(self, etype, value) | |
1322 | else: |
|
1319 | else: | |
1323 | # We must check the source cache because otherwise we can print |
|
1320 | # We must check the source cache because otherwise we can print | |
1324 | # out-of-date source code. |
|
1321 | # out-of-date source code. | |
1325 | self.check_cache() |
|
1322 | self.check_cache() | |
1326 | # Now we can extract and format the exception |
|
1323 | # Now we can extract and format the exception | |
1327 | return ListTB.structured_traceback( |
|
1324 | return ListTB.structured_traceback( | |
1328 | self, etype, value, tb, tb_offset, number_of_lines_of_context |
|
1325 | self, etype, value, tb, tb_offset, number_of_lines_of_context | |
1329 | ) |
|
1326 | ) | |
1330 |
|
1327 | |||
1331 | def stb2text(self, stb): |
|
1328 | def stb2text(self, stb): | |
1332 | """Convert a structured traceback (a list) to a string.""" |
|
1329 | """Convert a structured traceback (a list) to a string.""" | |
1333 | return self.tb_join_char.join(stb) |
|
1330 | return self.tb_join_char.join(stb) | |
1334 |
|
1331 | |||
1335 |
|
1332 | |||
1336 | def set_mode(self, mode=None): |
|
1333 | def set_mode(self, mode=None): | |
1337 | """Switch to the desired mode. |
|
1334 | """Switch to the desired mode. | |
1338 |
|
1335 | |||
1339 | If mode is not specified, cycles through the available modes.""" |
|
1336 | If mode is not specified, cycles through the available modes.""" | |
1340 |
|
1337 | |||
1341 | if not mode: |
|
1338 | if not mode: | |
1342 | new_idx = (self.valid_modes.index(self.mode) + 1 ) % \ |
|
1339 | new_idx = (self.valid_modes.index(self.mode) + 1 ) % \ | |
1343 | len(self.valid_modes) |
|
1340 | len(self.valid_modes) | |
1344 | self.mode = self.valid_modes[new_idx] |
|
1341 | self.mode = self.valid_modes[new_idx] | |
1345 | elif mode not in self.valid_modes: |
|
1342 | elif mode not in self.valid_modes: | |
1346 | raise ValueError('Unrecognized mode in FormattedTB: <' + mode + '>\n' |
|
1343 | raise ValueError('Unrecognized mode in FormattedTB: <' + mode + '>\n' | |
1347 | 'Valid modes: ' + str(self.valid_modes)) |
|
1344 | 'Valid modes: ' + str(self.valid_modes)) | |
1348 | else: |
|
1345 | else: | |
1349 | self.mode = mode |
|
1346 | self.mode = mode | |
1350 | # include variable details only in 'Verbose' mode |
|
1347 | # include variable details only in 'Verbose' mode | |
1351 | self.include_vars = (self.mode == self.valid_modes[2]) |
|
1348 | self.include_vars = (self.mode == self.valid_modes[2]) | |
1352 | # Set the join character for generating text tracebacks |
|
1349 | # Set the join character for generating text tracebacks | |
1353 | self.tb_join_char = self._join_chars[self.mode] |
|
1350 | self.tb_join_char = self._join_chars[self.mode] | |
1354 |
|
1351 | |||
1355 | # some convenient shortcuts |
|
1352 | # some convenient shortcuts | |
1356 | def plain(self): |
|
1353 | def plain(self): | |
1357 | self.set_mode(self.valid_modes[0]) |
|
1354 | self.set_mode(self.valid_modes[0]) | |
1358 |
|
1355 | |||
1359 | def context(self): |
|
1356 | def context(self): | |
1360 | self.set_mode(self.valid_modes[1]) |
|
1357 | self.set_mode(self.valid_modes[1]) | |
1361 |
|
1358 | |||
1362 | def verbose(self): |
|
1359 | def verbose(self): | |
1363 | self.set_mode(self.valid_modes[2]) |
|
1360 | self.set_mode(self.valid_modes[2]) | |
1364 |
|
1361 | |||
1365 | def minimal(self): |
|
1362 | def minimal(self): | |
1366 | self.set_mode(self.valid_modes[3]) |
|
1363 | self.set_mode(self.valid_modes[3]) | |
1367 |
|
1364 | |||
1368 |
|
1365 | |||
1369 | #---------------------------------------------------------------------------- |
|
1366 | #---------------------------------------------------------------------------- | |
1370 | class AutoFormattedTB(FormattedTB): |
|
1367 | class AutoFormattedTB(FormattedTB): | |
1371 | """A traceback printer which can be called on the fly. |
|
1368 | """A traceback printer which can be called on the fly. | |
1372 |
|
1369 | |||
1373 | It will find out about exceptions by itself. |
|
1370 | It will find out about exceptions by itself. | |
1374 |
|
1371 | |||
1375 | A brief example:: |
|
1372 | A brief example:: | |
1376 |
|
1373 | |||
1377 | AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux') |
|
1374 | AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux') | |
1378 | try: |
|
1375 | try: | |
1379 | ... |
|
1376 | ... | |
1380 | except: |
|
1377 | except: | |
1381 | AutoTB() # or AutoTB(out=logfile) where logfile is an open file object |
|
1378 | AutoTB() # or AutoTB(out=logfile) where logfile is an open file object | |
1382 | """ |
|
1379 | """ | |
1383 |
|
1380 | |||
1384 | def __call__(self, etype=None, evalue=None, etb=None, |
|
1381 | def __call__(self, etype=None, evalue=None, etb=None, | |
1385 | out=None, tb_offset=None): |
|
1382 | out=None, tb_offset=None): | |
1386 | """Print out a formatted exception traceback. |
|
1383 | """Print out a formatted exception traceback. | |
1387 |
|
1384 | |||
1388 | Optional arguments: |
|
1385 | Optional arguments: | |
1389 | - out: an open file-like object to direct output to. |
|
1386 | - out: an open file-like object to direct output to. | |
1390 |
|
1387 | |||
1391 | - tb_offset: the number of frames to skip over in the stack, on a |
|
1388 | - tb_offset: the number of frames to skip over in the stack, on a | |
1392 | per-call basis (this overrides temporarily the instance's tb_offset |
|
1389 | per-call basis (this overrides temporarily the instance's tb_offset | |
1393 | given at initialization time. """ |
|
1390 | given at initialization time. """ | |
1394 |
|
1391 | |||
1395 | if out is None: |
|
1392 | if out is None: | |
1396 | out = self.ostream |
|
1393 | out = self.ostream | |
1397 | out.flush() |
|
1394 | out.flush() | |
1398 | out.write(self.text(etype, evalue, etb, tb_offset)) |
|
1395 | out.write(self.text(etype, evalue, etb, tb_offset)) | |
1399 | out.write('\n') |
|
1396 | out.write('\n') | |
1400 | out.flush() |
|
1397 | out.flush() | |
1401 | # FIXME: we should remove the auto pdb behavior from here and leave |
|
1398 | # FIXME: we should remove the auto pdb behavior from here and leave | |
1402 | # that to the clients. |
|
1399 | # that to the clients. | |
1403 | try: |
|
1400 | try: | |
1404 | self.debugger() |
|
1401 | self.debugger() | |
1405 | except KeyboardInterrupt: |
|
1402 | except KeyboardInterrupt: | |
1406 | print("\nKeyboardInterrupt") |
|
1403 | print("\nKeyboardInterrupt") | |
1407 |
|
1404 | |||
1408 | def structured_traceback(self, etype=None, value=None, tb=None, |
|
1405 | def structured_traceback(self, etype=None, value=None, tb=None, | |
1409 | tb_offset=None, number_of_lines_of_context=5): |
|
1406 | tb_offset=None, number_of_lines_of_context=5): | |
1410 | if etype is None: |
|
1407 | if etype is None: | |
1411 | etype, value, tb = sys.exc_info() |
|
1408 | etype, value, tb = sys.exc_info() | |
1412 | if isinstance(tb, tuple): |
|
1409 | if isinstance(tb, tuple): | |
1413 | # tb is a tuple if this is a chained exception. |
|
1410 | # tb is a tuple if this is a chained exception. | |
1414 | self.tb = tb[0] |
|
1411 | self.tb = tb[0] | |
1415 | else: |
|
1412 | else: | |
1416 | self.tb = tb |
|
1413 | self.tb = tb | |
1417 | return FormattedTB.structured_traceback( |
|
1414 | return FormattedTB.structured_traceback( | |
1418 | self, etype, value, tb, tb_offset, number_of_lines_of_context) |
|
1415 | self, etype, value, tb, tb_offset, number_of_lines_of_context) | |
1419 |
|
1416 | |||
1420 |
|
1417 | |||
1421 | #--------------------------------------------------------------------------- |
|
1418 | #--------------------------------------------------------------------------- | |
1422 |
|
1419 | |||
1423 | # A simple class to preserve Nathan's original functionality. |
|
1420 | # A simple class to preserve Nathan's original functionality. | |
1424 | class ColorTB(FormattedTB): |
|
1421 | class ColorTB(FormattedTB): | |
1425 | """Shorthand to initialize a FormattedTB in Linux colors mode.""" |
|
1422 | """Shorthand to initialize a FormattedTB in Linux colors mode.""" | |
1426 |
|
1423 | |||
1427 | def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs): |
|
1424 | def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs): | |
1428 | FormattedTB.__init__(self, color_scheme=color_scheme, |
|
1425 | FormattedTB.__init__(self, color_scheme=color_scheme, | |
1429 | call_pdb=call_pdb, **kwargs) |
|
1426 | call_pdb=call_pdb, **kwargs) | |
1430 |
|
1427 | |||
1431 |
|
1428 | |||
1432 | class SyntaxTB(ListTB): |
|
1429 | class SyntaxTB(ListTB): | |
1433 | """Extension which holds some state: the last exception value""" |
|
1430 | """Extension which holds some state: the last exception value""" | |
1434 |
|
1431 | |||
1435 | def __init__(self, color_scheme='NoColor', parent=None, config=None): |
|
1432 | def __init__(self, color_scheme='NoColor', parent=None, config=None): | |
1436 | ListTB.__init__(self, color_scheme, parent=parent, config=config) |
|
1433 | ListTB.__init__(self, color_scheme, parent=parent, config=config) | |
1437 | self.last_syntax_error = None |
|
1434 | self.last_syntax_error = None | |
1438 |
|
1435 | |||
1439 | def __call__(self, etype, value, elist): |
|
1436 | def __call__(self, etype, value, elist): | |
1440 | self.last_syntax_error = value |
|
1437 | self.last_syntax_error = value | |
1441 |
|
1438 | |||
1442 | ListTB.__call__(self, etype, value, elist) |
|
1439 | ListTB.__call__(self, etype, value, elist) | |
1443 |
|
1440 | |||
1444 | def structured_traceback(self, etype, value, elist, tb_offset=None, |
|
1441 | def structured_traceback(self, etype, value, elist, tb_offset=None, | |
1445 | context=5): |
|
1442 | context=5): | |
1446 | # If the source file has been edited, the line in the syntax error can |
|
1443 | # If the source file has been edited, the line in the syntax error can | |
1447 | # be wrong (retrieved from an outdated cache). This replaces it with |
|
1444 | # be wrong (retrieved from an outdated cache). This replaces it with | |
1448 | # the current value. |
|
1445 | # the current value. | |
1449 | if isinstance(value, SyntaxError) \ |
|
1446 | if isinstance(value, SyntaxError) \ | |
1450 | and isinstance(value.filename, str) \ |
|
1447 | and isinstance(value.filename, str) \ | |
1451 | and isinstance(value.lineno, int): |
|
1448 | and isinstance(value.lineno, int): | |
1452 | linecache.checkcache(value.filename) |
|
1449 | linecache.checkcache(value.filename) | |
1453 | newtext = linecache.getline(value.filename, value.lineno) |
|
1450 | newtext = linecache.getline(value.filename, value.lineno) | |
1454 | if newtext: |
|
1451 | if newtext: | |
1455 | value.text = newtext |
|
1452 | value.text = newtext | |
1456 | self.last_syntax_error = value |
|
1453 | self.last_syntax_error = value | |
1457 | return super(SyntaxTB, self).structured_traceback(etype, value, elist, |
|
1454 | return super(SyntaxTB, self).structured_traceback(etype, value, elist, | |
1458 | tb_offset=tb_offset, context=context) |
|
1455 | tb_offset=tb_offset, context=context) | |
1459 |
|
1456 | |||
1460 | def clear_err_state(self): |
|
1457 | def clear_err_state(self): | |
1461 | """Return the current error state and clear it""" |
|
1458 | """Return the current error state and clear it""" | |
1462 | e = self.last_syntax_error |
|
1459 | e = self.last_syntax_error | |
1463 | self.last_syntax_error = None |
|
1460 | self.last_syntax_error = None | |
1464 | return e |
|
1461 | return e | |
1465 |
|
1462 | |||
1466 | def stb2text(self, stb): |
|
1463 | def stb2text(self, stb): | |
1467 | """Convert a structured traceback (a list) to a string.""" |
|
1464 | """Convert a structured traceback (a list) to a string.""" | |
1468 | return ''.join(stb) |
|
1465 | return ''.join(stb) | |
1469 |
|
1466 | |||
1470 |
|
1467 | |||
1471 | # some internal-use functions |
|
1468 | # some internal-use functions | |
1472 | def text_repr(value): |
|
1469 | def text_repr(value): | |
1473 | """Hopefully pretty robust repr equivalent.""" |
|
1470 | """Hopefully pretty robust repr equivalent.""" | |
1474 | # this is pretty horrible but should always return *something* |
|
1471 | # this is pretty horrible but should always return *something* | |
1475 | try: |
|
1472 | try: | |
1476 | return pydoc.text.repr(value) |
|
1473 | return pydoc.text.repr(value) | |
1477 | except KeyboardInterrupt: |
|
1474 | except KeyboardInterrupt: | |
1478 | raise |
|
1475 | raise | |
1479 | except: |
|
1476 | except: | |
1480 | try: |
|
1477 | try: | |
1481 | return repr(value) |
|
1478 | return repr(value) | |
1482 | except KeyboardInterrupt: |
|
1479 | except KeyboardInterrupt: | |
1483 | raise |
|
1480 | raise | |
1484 | except: |
|
1481 | except: | |
1485 | try: |
|
1482 | try: | |
1486 | # all still in an except block so we catch |
|
1483 | # all still in an except block so we catch | |
1487 | # getattr raising |
|
1484 | # getattr raising | |
1488 | name = getattr(value, '__name__', None) |
|
1485 | name = getattr(value, '__name__', None) | |
1489 | if name: |
|
1486 | if name: | |
1490 | # ick, recursion |
|
1487 | # ick, recursion | |
1491 | return text_repr(name) |
|
1488 | return text_repr(name) | |
1492 | klass = getattr(value, '__class__', None) |
|
1489 | klass = getattr(value, '__class__', None) | |
1493 | if klass: |
|
1490 | if klass: | |
1494 | return '%s instance' % text_repr(klass) |
|
1491 | return '%s instance' % text_repr(klass) | |
1495 | except KeyboardInterrupt: |
|
1492 | except KeyboardInterrupt: | |
1496 | raise |
|
1493 | raise | |
1497 | except: |
|
1494 | except: | |
1498 | return 'UNRECOVERABLE REPR FAILURE' |
|
1495 | return 'UNRECOVERABLE REPR FAILURE' | |
1499 |
|
1496 | |||
1500 |
|
1497 | |||
1501 | def eqrepr(value, repr=text_repr): |
|
1498 | def eqrepr(value, repr=text_repr): | |
1502 | return '=%s' % repr(value) |
|
1499 | return '=%s' % repr(value) | |
1503 |
|
1500 | |||
1504 |
|
1501 | |||
1505 | def nullrepr(value, repr=text_repr): |
|
1502 | def nullrepr(value, repr=text_repr): | |
1506 | return '' |
|
1503 | return '' |
@@ -1,347 +1,341 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | Provides a reload() function that acts recursively. |
|
3 | Provides a reload() function that acts recursively. | |
4 |
|
4 | |||
5 | Python's normal :func:`python:reload` function only reloads the module that it's |
|
5 | Python's normal :func:`python:reload` function only reloads the module that it's | |
6 | passed. The :func:`reload` function in this module also reloads everything |
|
6 | passed. The :func:`reload` function in this module also reloads everything | |
7 | imported from that module, which is useful when you're changing files deep |
|
7 | imported from that module, which is useful when you're changing files deep | |
8 | inside a package. |
|
8 | inside a package. | |
9 |
|
9 | |||
10 |
To use this as your default reload function, type this |
|
10 | To use this as your default reload function, type this:: | |
11 |
|
||||
12 | import __builtin__ |
|
|||
13 | from IPython.lib import deepreload |
|
|||
14 | __builtin__.reload = deepreload.reload |
|
|||
15 |
|
||||
16 | Or this for Python 3:: |
|
|||
17 |
|
11 | |||
18 | import builtins |
|
12 | import builtins | |
19 | from IPython.lib import deepreload |
|
13 | from IPython.lib import deepreload | |
20 | builtins.reload = deepreload.reload |
|
14 | builtins.reload = deepreload.reload | |
21 |
|
15 | |||
22 | A reference to the original :func:`python:reload` is stored in this module as |
|
16 | A reference to the original :func:`python:reload` is stored in this module as | |
23 | :data:`original_reload`, so you can restore it later. |
|
17 | :data:`original_reload`, so you can restore it later. | |
24 |
|
18 | |||
25 | This code is almost entirely based on knee.py, which is a Python |
|
19 | This code is almost entirely based on knee.py, which is a Python | |
26 | re-implementation of hierarchical module import. |
|
20 | re-implementation of hierarchical module import. | |
27 | """ |
|
21 | """ | |
28 | #***************************************************************************** |
|
22 | #***************************************************************************** | |
29 | # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu> |
|
23 | # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu> | |
30 | # |
|
24 | # | |
31 | # Distributed under the terms of the BSD License. The full license is in |
|
25 | # Distributed under the terms of the BSD License. The full license is in | |
32 | # the file COPYING, distributed as part of this software. |
|
26 | # the file COPYING, distributed as part of this software. | |
33 | #***************************************************************************** |
|
27 | #***************************************************************************** | |
34 |
|
28 | |||
35 | import builtins as builtin_mod |
|
29 | import builtins as builtin_mod | |
36 | from contextlib import contextmanager |
|
30 | from contextlib import contextmanager | |
37 | import imp |
|
31 | import imp | |
38 | import sys |
|
32 | import sys | |
39 |
|
33 | |||
40 | from types import ModuleType |
|
34 | from types import ModuleType | |
41 | from warnings import warn |
|
35 | from warnings import warn | |
42 | import types |
|
36 | import types | |
43 |
|
37 | |||
44 | original_import = builtin_mod.__import__ |
|
38 | original_import = builtin_mod.__import__ | |
45 |
|
39 | |||
46 | @contextmanager |
|
40 | @contextmanager | |
47 | def replace_import_hook(new_import): |
|
41 | def replace_import_hook(new_import): | |
48 | saved_import = builtin_mod.__import__ |
|
42 | saved_import = builtin_mod.__import__ | |
49 | builtin_mod.__import__ = new_import |
|
43 | builtin_mod.__import__ = new_import | |
50 | try: |
|
44 | try: | |
51 | yield |
|
45 | yield | |
52 | finally: |
|
46 | finally: | |
53 | builtin_mod.__import__ = saved_import |
|
47 | builtin_mod.__import__ = saved_import | |
54 |
|
48 | |||
55 | def get_parent(globals, level): |
|
49 | def get_parent(globals, level): | |
56 | """ |
|
50 | """ | |
57 | parent, name = get_parent(globals, level) |
|
51 | parent, name = get_parent(globals, level) | |
58 |
|
52 | |||
59 | Return the package that an import is being performed in. If globals comes |
|
53 | Return the package that an import is being performed in. If globals comes | |
60 | from the module foo.bar.bat (not itself a package), this returns the |
|
54 | from the module foo.bar.bat (not itself a package), this returns the | |
61 | sys.modules entry for foo.bar. If globals is from a package's __init__.py, |
|
55 | sys.modules entry for foo.bar. If globals is from a package's __init__.py, | |
62 | the package's entry in sys.modules is returned. |
|
56 | the package's entry in sys.modules is returned. | |
63 |
|
57 | |||
64 | If globals doesn't come from a package or a module in a package, or a |
|
58 | If globals doesn't come from a package or a module in a package, or a | |
65 | corresponding entry is not found in sys.modules, None is returned. |
|
59 | corresponding entry is not found in sys.modules, None is returned. | |
66 | """ |
|
60 | """ | |
67 | orig_level = level |
|
61 | orig_level = level | |
68 |
|
62 | |||
69 | if not level or not isinstance(globals, dict): |
|
63 | if not level or not isinstance(globals, dict): | |
70 | return None, '' |
|
64 | return None, '' | |
71 |
|
65 | |||
72 | pkgname = globals.get('__package__', None) |
|
66 | pkgname = globals.get('__package__', None) | |
73 |
|
67 | |||
74 | if pkgname is not None: |
|
68 | if pkgname is not None: | |
75 | # __package__ is set, so use it |
|
69 | # __package__ is set, so use it | |
76 | if not hasattr(pkgname, 'rindex'): |
|
70 | if not hasattr(pkgname, 'rindex'): | |
77 | raise ValueError('__package__ set to non-string') |
|
71 | raise ValueError('__package__ set to non-string') | |
78 | if len(pkgname) == 0: |
|
72 | if len(pkgname) == 0: | |
79 | if level > 0: |
|
73 | if level > 0: | |
80 | raise ValueError('Attempted relative import in non-package') |
|
74 | raise ValueError('Attempted relative import in non-package') | |
81 | return None, '' |
|
75 | return None, '' | |
82 | name = pkgname |
|
76 | name = pkgname | |
83 | else: |
|
77 | else: | |
84 | # __package__ not set, so figure it out and set it |
|
78 | # __package__ not set, so figure it out and set it | |
85 | if '__name__' not in globals: |
|
79 | if '__name__' not in globals: | |
86 | return None, '' |
|
80 | return None, '' | |
87 | modname = globals['__name__'] |
|
81 | modname = globals['__name__'] | |
88 |
|
82 | |||
89 | if '__path__' in globals: |
|
83 | if '__path__' in globals: | |
90 | # __path__ is set, so modname is already the package name |
|
84 | # __path__ is set, so modname is already the package name | |
91 | globals['__package__'] = name = modname |
|
85 | globals['__package__'] = name = modname | |
92 | else: |
|
86 | else: | |
93 | # Normal module, so work out the package name if any |
|
87 | # Normal module, so work out the package name if any | |
94 | lastdot = modname.rfind('.') |
|
88 | lastdot = modname.rfind('.') | |
95 | if lastdot < 0 < level: |
|
89 | if lastdot < 0 < level: | |
96 | raise ValueError("Attempted relative import in non-package") |
|
90 | raise ValueError("Attempted relative import in non-package") | |
97 | if lastdot < 0: |
|
91 | if lastdot < 0: | |
98 | globals['__package__'] = None |
|
92 | globals['__package__'] = None | |
99 | return None, '' |
|
93 | return None, '' | |
100 | globals['__package__'] = name = modname[:lastdot] |
|
94 | globals['__package__'] = name = modname[:lastdot] | |
101 |
|
95 | |||
102 | dot = len(name) |
|
96 | dot = len(name) | |
103 | for x in range(level, 1, -1): |
|
97 | for x in range(level, 1, -1): | |
104 | try: |
|
98 | try: | |
105 | dot = name.rindex('.', 0, dot) |
|
99 | dot = name.rindex('.', 0, dot) | |
106 | except ValueError: |
|
100 | except ValueError: | |
107 | raise ValueError("attempted relative import beyond top-level " |
|
101 | raise ValueError("attempted relative import beyond top-level " | |
108 | "package") |
|
102 | "package") | |
109 | name = name[:dot] |
|
103 | name = name[:dot] | |
110 |
|
104 | |||
111 | try: |
|
105 | try: | |
112 | parent = sys.modules[name] |
|
106 | parent = sys.modules[name] | |
113 | except: |
|
107 | except: | |
114 | if orig_level < 1: |
|
108 | if orig_level < 1: | |
115 | warn("Parent module '%.200s' not found while handling absolute " |
|
109 | warn("Parent module '%.200s' not found while handling absolute " | |
116 | "import" % name) |
|
110 | "import" % name) | |
117 | parent = None |
|
111 | parent = None | |
118 | else: |
|
112 | else: | |
119 | raise SystemError("Parent module '%.200s' not loaded, cannot " |
|
113 | raise SystemError("Parent module '%.200s' not loaded, cannot " | |
120 | "perform relative import" % name) |
|
114 | "perform relative import" % name) | |
121 |
|
115 | |||
122 | # We expect, but can't guarantee, if parent != None, that: |
|
116 | # We expect, but can't guarantee, if parent != None, that: | |
123 | # - parent.__name__ == name |
|
117 | # - parent.__name__ == name | |
124 | # - parent.__dict__ is globals |
|
118 | # - parent.__dict__ is globals | |
125 | # If this is violated... Who cares? |
|
119 | # If this is violated... Who cares? | |
126 | return parent, name |
|
120 | return parent, name | |
127 |
|
121 | |||
128 | def load_next(mod, altmod, name, buf): |
|
122 | def load_next(mod, altmod, name, buf): | |
129 | """ |
|
123 | """ | |
130 | mod, name, buf = load_next(mod, altmod, name, buf) |
|
124 | mod, name, buf = load_next(mod, altmod, name, buf) | |
131 |
|
125 | |||
132 | altmod is either None or same as mod |
|
126 | altmod is either None or same as mod | |
133 | """ |
|
127 | """ | |
134 |
|
128 | |||
135 | if len(name) == 0: |
|
129 | if len(name) == 0: | |
136 | # completely empty module name should only happen in |
|
130 | # completely empty module name should only happen in | |
137 | # 'from . import' (or '__import__("")') |
|
131 | # 'from . import' (or '__import__("")') | |
138 | return mod, None, buf |
|
132 | return mod, None, buf | |
139 |
|
133 | |||
140 | dot = name.find('.') |
|
134 | dot = name.find('.') | |
141 | if dot == 0: |
|
135 | if dot == 0: | |
142 | raise ValueError('Empty module name') |
|
136 | raise ValueError('Empty module name') | |
143 |
|
137 | |||
144 | if dot < 0: |
|
138 | if dot < 0: | |
145 | subname = name |
|
139 | subname = name | |
146 | next = None |
|
140 | next = None | |
147 | else: |
|
141 | else: | |
148 | subname = name[:dot] |
|
142 | subname = name[:dot] | |
149 | next = name[dot+1:] |
|
143 | next = name[dot+1:] | |
150 |
|
144 | |||
151 | if buf != '': |
|
145 | if buf != '': | |
152 | buf += '.' |
|
146 | buf += '.' | |
153 | buf += subname |
|
147 | buf += subname | |
154 |
|
148 | |||
155 | result = import_submodule(mod, subname, buf) |
|
149 | result = import_submodule(mod, subname, buf) | |
156 | if result is None and mod != altmod: |
|
150 | if result is None and mod != altmod: | |
157 | result = import_submodule(altmod, subname, subname) |
|
151 | result = import_submodule(altmod, subname, subname) | |
158 | if result is not None: |
|
152 | if result is not None: | |
159 | buf = subname |
|
153 | buf = subname | |
160 |
|
154 | |||
161 | if result is None: |
|
155 | if result is None: | |
162 | raise ImportError("No module named %.200s" % name) |
|
156 | raise ImportError("No module named %.200s" % name) | |
163 |
|
157 | |||
164 | return result, next, buf |
|
158 | return result, next, buf | |
165 |
|
159 | |||
166 |
|
160 | |||
167 | # Need to keep track of what we've already reloaded to prevent cyclic evil |
|
161 | # Need to keep track of what we've already reloaded to prevent cyclic evil | |
168 | found_now = {} |
|
162 | found_now = {} | |
169 |
|
163 | |||
170 | def import_submodule(mod, subname, fullname): |
|
164 | def import_submodule(mod, subname, fullname): | |
171 | """m = import_submodule(mod, subname, fullname)""" |
|
165 | """m = import_submodule(mod, subname, fullname)""" | |
172 | # Require: |
|
166 | # Require: | |
173 | # if mod == None: subname == fullname |
|
167 | # if mod == None: subname == fullname | |
174 | # else: mod.__name__ + "." + subname == fullname |
|
168 | # else: mod.__name__ + "." + subname == fullname | |
175 |
|
169 | |||
176 | global found_now |
|
170 | global found_now | |
177 | if fullname in found_now and fullname in sys.modules: |
|
171 | if fullname in found_now and fullname in sys.modules: | |
178 | m = sys.modules[fullname] |
|
172 | m = sys.modules[fullname] | |
179 | else: |
|
173 | else: | |
180 | print('Reloading', fullname) |
|
174 | print('Reloading', fullname) | |
181 | found_now[fullname] = 1 |
|
175 | found_now[fullname] = 1 | |
182 | oldm = sys.modules.get(fullname, None) |
|
176 | oldm = sys.modules.get(fullname, None) | |
183 |
|
177 | |||
184 | if mod is None: |
|
178 | if mod is None: | |
185 | path = None |
|
179 | path = None | |
186 | elif hasattr(mod, '__path__'): |
|
180 | elif hasattr(mod, '__path__'): | |
187 | path = mod.__path__ |
|
181 | path = mod.__path__ | |
188 | else: |
|
182 | else: | |
189 | return None |
|
183 | return None | |
190 |
|
184 | |||
191 | try: |
|
185 | try: | |
192 | # This appears to be necessary on Python 3, because imp.find_module() |
|
186 | # This appears to be necessary on Python 3, because imp.find_module() | |
193 | # tries to import standard libraries (like io) itself, and we don't |
|
187 | # tries to import standard libraries (like io) itself, and we don't | |
194 | # want them to be processed by our deep_import_hook. |
|
188 | # want them to be processed by our deep_import_hook. | |
195 | with replace_import_hook(original_import): |
|
189 | with replace_import_hook(original_import): | |
196 | fp, filename, stuff = imp.find_module(subname, path) |
|
190 | fp, filename, stuff = imp.find_module(subname, path) | |
197 | except ImportError: |
|
191 | except ImportError: | |
198 | return None |
|
192 | return None | |
199 |
|
193 | |||
200 | try: |
|
194 | try: | |
201 | m = imp.load_module(fullname, fp, filename, stuff) |
|
195 | m = imp.load_module(fullname, fp, filename, stuff) | |
202 | except: |
|
196 | except: | |
203 | # load_module probably removed name from modules because of |
|
197 | # load_module probably removed name from modules because of | |
204 | # the error. Put back the original module object. |
|
198 | # the error. Put back the original module object. | |
205 | if oldm: |
|
199 | if oldm: | |
206 | sys.modules[fullname] = oldm |
|
200 | sys.modules[fullname] = oldm | |
207 | raise |
|
201 | raise | |
208 | finally: |
|
202 | finally: | |
209 | if fp: fp.close() |
|
203 | if fp: fp.close() | |
210 |
|
204 | |||
211 | add_submodule(mod, m, fullname, subname) |
|
205 | add_submodule(mod, m, fullname, subname) | |
212 |
|
206 | |||
213 | return m |
|
207 | return m | |
214 |
|
208 | |||
215 | def add_submodule(mod, submod, fullname, subname): |
|
209 | def add_submodule(mod, submod, fullname, subname): | |
216 | """mod.{subname} = submod""" |
|
210 | """mod.{subname} = submod""" | |
217 | if mod is None: |
|
211 | if mod is None: | |
218 | return #Nothing to do here. |
|
212 | return #Nothing to do here. | |
219 |
|
213 | |||
220 | if submod is None: |
|
214 | if submod is None: | |
221 | submod = sys.modules[fullname] |
|
215 | submod = sys.modules[fullname] | |
222 |
|
216 | |||
223 | setattr(mod, subname, submod) |
|
217 | setattr(mod, subname, submod) | |
224 |
|
218 | |||
225 | return |
|
219 | return | |
226 |
|
220 | |||
227 | def ensure_fromlist(mod, fromlist, buf, recursive): |
|
221 | def ensure_fromlist(mod, fromlist, buf, recursive): | |
228 | """Handle 'from module import a, b, c' imports.""" |
|
222 | """Handle 'from module import a, b, c' imports.""" | |
229 | if not hasattr(mod, '__path__'): |
|
223 | if not hasattr(mod, '__path__'): | |
230 | return |
|
224 | return | |
231 | for item in fromlist: |
|
225 | for item in fromlist: | |
232 | if not hasattr(item, 'rindex'): |
|
226 | if not hasattr(item, 'rindex'): | |
233 | raise TypeError("Item in ``from list'' not a string") |
|
227 | raise TypeError("Item in ``from list'' not a string") | |
234 | if item == '*': |
|
228 | if item == '*': | |
235 | if recursive: |
|
229 | if recursive: | |
236 | continue # avoid endless recursion |
|
230 | continue # avoid endless recursion | |
237 | try: |
|
231 | try: | |
238 | all = mod.__all__ |
|
232 | all = mod.__all__ | |
239 | except AttributeError: |
|
233 | except AttributeError: | |
240 | pass |
|
234 | pass | |
241 | else: |
|
235 | else: | |
242 | ret = ensure_fromlist(mod, all, buf, 1) |
|
236 | ret = ensure_fromlist(mod, all, buf, 1) | |
243 | if not ret: |
|
237 | if not ret: | |
244 | return 0 |
|
238 | return 0 | |
245 | elif not hasattr(mod, item): |
|
239 | elif not hasattr(mod, item): | |
246 | import_submodule(mod, item, buf + '.' + item) |
|
240 | import_submodule(mod, item, buf + '.' + item) | |
247 |
|
241 | |||
248 | def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1): |
|
242 | def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1): | |
249 | """Replacement for __import__()""" |
|
243 | """Replacement for __import__()""" | |
250 | parent, buf = get_parent(globals, level) |
|
244 | parent, buf = get_parent(globals, level) | |
251 |
|
245 | |||
252 | head, name, buf = load_next(parent, None if level < 0 else parent, name, buf) |
|
246 | head, name, buf = load_next(parent, None if level < 0 else parent, name, buf) | |
253 |
|
247 | |||
254 | tail = head |
|
248 | tail = head | |
255 | while name: |
|
249 | while name: | |
256 | tail, name, buf = load_next(tail, tail, name, buf) |
|
250 | tail, name, buf = load_next(tail, tail, name, buf) | |
257 |
|
251 | |||
258 | # If tail is None, both get_parent and load_next found |
|
252 | # If tail is None, both get_parent and load_next found | |
259 | # an empty module name: someone called __import__("") or |
|
253 | # an empty module name: someone called __import__("") or | |
260 | # doctored faulty bytecode |
|
254 | # doctored faulty bytecode | |
261 | if tail is None: |
|
255 | if tail is None: | |
262 | raise ValueError('Empty module name') |
|
256 | raise ValueError('Empty module name') | |
263 |
|
257 | |||
264 | if not fromlist: |
|
258 | if not fromlist: | |
265 | return head |
|
259 | return head | |
266 |
|
260 | |||
267 | ensure_fromlist(tail, fromlist, buf, 0) |
|
261 | ensure_fromlist(tail, fromlist, buf, 0) | |
268 | return tail |
|
262 | return tail | |
269 |
|
263 | |||
270 | modules_reloading = {} |
|
264 | modules_reloading = {} | |
271 |
|
265 | |||
272 | def deep_reload_hook(m): |
|
266 | def deep_reload_hook(m): | |
273 | """Replacement for reload().""" |
|
267 | """Replacement for reload().""" | |
274 | # Hardcode this one as it would raise a NotImplementedError from the |
|
268 | # Hardcode this one as it would raise a NotImplementedError from the | |
275 | # bowels of Python and screw up the import machinery after. |
|
269 | # bowels of Python and screw up the import machinery after. | |
276 | # unlike other imports the `exclude` list already in place is not enough. |
|
270 | # unlike other imports the `exclude` list already in place is not enough. | |
277 |
|
271 | |||
278 | if m is types: |
|
272 | if m is types: | |
279 | return m |
|
273 | return m | |
280 | if not isinstance(m, ModuleType): |
|
274 | if not isinstance(m, ModuleType): | |
281 | raise TypeError("reload() argument must be module") |
|
275 | raise TypeError("reload() argument must be module") | |
282 |
|
276 | |||
283 | name = m.__name__ |
|
277 | name = m.__name__ | |
284 |
|
278 | |||
285 | if name not in sys.modules: |
|
279 | if name not in sys.modules: | |
286 | raise ImportError("reload(): module %.200s not in sys.modules" % name) |
|
280 | raise ImportError("reload(): module %.200s not in sys.modules" % name) | |
287 |
|
281 | |||
288 | global modules_reloading |
|
282 | global modules_reloading | |
289 | try: |
|
283 | try: | |
290 | return modules_reloading[name] |
|
284 | return modules_reloading[name] | |
291 | except: |
|
285 | except: | |
292 | modules_reloading[name] = m |
|
286 | modules_reloading[name] = m | |
293 |
|
287 | |||
294 | dot = name.rfind('.') |
|
288 | dot = name.rfind('.') | |
295 | if dot < 0: |
|
289 | if dot < 0: | |
296 | subname = name |
|
290 | subname = name | |
297 | path = None |
|
291 | path = None | |
298 | else: |
|
292 | else: | |
299 | try: |
|
293 | try: | |
300 | parent = sys.modules[name[:dot]] |
|
294 | parent = sys.modules[name[:dot]] | |
301 | except KeyError: |
|
295 | except KeyError: | |
302 | modules_reloading.clear() |
|
296 | modules_reloading.clear() | |
303 | raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) |
|
297 | raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) | |
304 | subname = name[dot+1:] |
|
298 | subname = name[dot+1:] | |
305 | path = getattr(parent, "__path__", None) |
|
299 | path = getattr(parent, "__path__", None) | |
306 |
|
300 | |||
307 | try: |
|
301 | try: | |
308 | # This appears to be necessary on Python 3, because imp.find_module() |
|
302 | # This appears to be necessary on Python 3, because imp.find_module() | |
309 | # tries to import standard libraries (like io) itself, and we don't |
|
303 | # tries to import standard libraries (like io) itself, and we don't | |
310 | # want them to be processed by our deep_import_hook. |
|
304 | # want them to be processed by our deep_import_hook. | |
311 | with replace_import_hook(original_import): |
|
305 | with replace_import_hook(original_import): | |
312 | fp, filename, stuff = imp.find_module(subname, path) |
|
306 | fp, filename, stuff = imp.find_module(subname, path) | |
313 | finally: |
|
307 | finally: | |
314 | modules_reloading.clear() |
|
308 | modules_reloading.clear() | |
315 |
|
309 | |||
316 | try: |
|
310 | try: | |
317 | newm = imp.load_module(name, fp, filename, stuff) |
|
311 | newm = imp.load_module(name, fp, filename, stuff) | |
318 | except: |
|
312 | except: | |
319 | # load_module probably removed name from modules because of |
|
313 | # load_module probably removed name from modules because of | |
320 | # the error. Put back the original module object. |
|
314 | # the error. Put back the original module object. | |
321 | sys.modules[name] = m |
|
315 | sys.modules[name] = m | |
322 | raise |
|
316 | raise | |
323 | finally: |
|
317 | finally: | |
324 | if fp: fp.close() |
|
318 | if fp: fp.close() | |
325 |
|
319 | |||
326 | modules_reloading.clear() |
|
320 | modules_reloading.clear() | |
327 | return newm |
|
321 | return newm | |
328 |
|
322 | |||
329 | # Save the original hooks |
|
323 | # Save the original hooks | |
330 | original_reload = imp.reload |
|
324 | original_reload = imp.reload | |
331 |
|
325 | |||
332 | # Replacement for reload() |
|
326 | # Replacement for reload() | |
333 | def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__', |
|
327 | def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__', | |
334 | 'numpy', 'numpy._globals')): |
|
328 | 'numpy', 'numpy._globals')): | |
335 | """Recursively reload all modules used in the given module. Optionally |
|
329 | """Recursively reload all modules used in the given module. Optionally | |
336 | takes a list of modules to exclude from reloading. The default exclude |
|
330 | takes a list of modules to exclude from reloading. The default exclude | |
337 | list contains sys, __main__, and __builtin__, to prevent, e.g., resetting |
|
331 | list contains sys, __main__, and __builtin__, to prevent, e.g., resetting | |
338 | display, exception, and io hooks. |
|
332 | display, exception, and io hooks. | |
339 | """ |
|
333 | """ | |
340 | global found_now |
|
334 | global found_now | |
341 | for i in exclude: |
|
335 | for i in exclude: | |
342 | found_now[i] = 1 |
|
336 | found_now[i] = 1 | |
343 | try: |
|
337 | try: | |
344 | with replace_import_hook(deep_import_hook): |
|
338 | with replace_import_hook(deep_import_hook): | |
345 | return deep_reload_hook(module) |
|
339 | return deep_reload_hook(module) | |
346 | finally: |
|
340 | finally: | |
347 | found_now = {} |
|
341 | found_now = {} |
@@ -1,532 +1,532 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | Defines a variety of Pygments lexers for highlighting IPython code. |
|
3 | Defines a variety of Pygments lexers for highlighting IPython code. | |
4 |
|
4 | |||
5 | This includes: |
|
5 | This includes: | |
6 |
|
6 | |||
7 | IPythonLexer, IPython3Lexer |
|
7 | IPythonLexer, IPython3Lexer | |
8 | Lexers for pure IPython (python + magic/shell commands) |
|
8 | Lexers for pure IPython (python + magic/shell commands) | |
9 |
|
9 | |||
10 | IPythonPartialTracebackLexer, IPythonTracebackLexer |
|
10 | IPythonPartialTracebackLexer, IPythonTracebackLexer | |
11 | Supports 2.x and 3.x via keyword `python3`. The partial traceback |
|
11 | Supports 2.x and 3.x via keyword `python3`. The partial traceback | |
12 | lexer reads everything but the Python code appearing in a traceback. |
|
12 | lexer reads everything but the Python code appearing in a traceback. | |
13 | The full lexer combines the partial lexer with an IPython lexer. |
|
13 | The full lexer combines the partial lexer with an IPython lexer. | |
14 |
|
14 | |||
15 | IPythonConsoleLexer |
|
15 | IPythonConsoleLexer | |
16 | A lexer for IPython console sessions, with support for tracebacks. |
|
16 | A lexer for IPython console sessions, with support for tracebacks. | |
17 |
|
17 | |||
18 | IPyLexer |
|
18 | IPyLexer | |
19 | A friendly lexer which examines the first line of text and from it, |
|
19 | A friendly lexer which examines the first line of text and from it, | |
20 | decides whether to use an IPython lexer or an IPython console lexer. |
|
20 | decides whether to use an IPython lexer or an IPython console lexer. | |
21 | This is probably the only lexer that needs to be explicitly added |
|
21 | This is probably the only lexer that needs to be explicitly added | |
22 | to Pygments. |
|
22 | to Pygments. | |
23 |
|
23 | |||
24 | """ |
|
24 | """ | |
25 | #----------------------------------------------------------------------------- |
|
25 | #----------------------------------------------------------------------------- | |
26 | # Copyright (c) 2013, the IPython Development Team. |
|
26 | # Copyright (c) 2013, the IPython Development Team. | |
27 | # |
|
27 | # | |
28 | # Distributed under the terms of the Modified BSD License. |
|
28 | # Distributed under the terms of the Modified BSD License. | |
29 | # |
|
29 | # | |
30 | # The full license is in the file COPYING.txt, distributed with this software. |
|
30 | # The full license is in the file COPYING.txt, distributed with this software. | |
31 | #----------------------------------------------------------------------------- |
|
31 | #----------------------------------------------------------------------------- | |
32 |
|
32 | |||
33 | # Standard library |
|
33 | # Standard library | |
34 | import re |
|
34 | import re | |
35 |
|
35 | |||
36 | # Third party |
|
36 | # Third party | |
37 | from pygments.lexers import ( |
|
37 | from pygments.lexers import ( | |
38 | BashLexer, HtmlLexer, JavascriptLexer, RubyLexer, PerlLexer, PythonLexer, |
|
38 | BashLexer, HtmlLexer, JavascriptLexer, RubyLexer, PerlLexer, PythonLexer, | |
39 | Python3Lexer, TexLexer) |
|
39 | Python3Lexer, TexLexer) | |
40 | from pygments.lexer import ( |
|
40 | from pygments.lexer import ( | |
41 | Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using, |
|
41 | Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using, | |
42 | ) |
|
42 | ) | |
43 | from pygments.token import ( |
|
43 | from pygments.token import ( | |
44 | Generic, Keyword, Literal, Name, Operator, Other, Text, Error, |
|
44 | Generic, Keyword, Literal, Name, Operator, Other, Text, Error, | |
45 | ) |
|
45 | ) | |
46 | from pygments.util import get_bool_opt |
|
46 | from pygments.util import get_bool_opt | |
47 |
|
47 | |||
48 | # Local |
|
48 | # Local | |
49 |
|
49 | |||
50 | line_re = re.compile('.*?\n') |
|
50 | line_re = re.compile('.*?\n') | |
51 |
|
51 | |||
52 | __all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer', |
|
52 | __all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer', | |
53 | 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer', |
|
53 | 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer', | |
54 | 'IPythonConsoleLexer', 'IPyLexer'] |
|
54 | 'IPythonConsoleLexer', 'IPyLexer'] | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | def build_ipy_lexer(python3): |
|
57 | def build_ipy_lexer(python3): | |
58 | """Builds IPython lexers depending on the value of `python3`. |
|
58 | """Builds IPython lexers depending on the value of `python3`. | |
59 |
|
59 | |||
60 | The lexer inherits from an appropriate Python lexer and then adds |
|
60 | The lexer inherits from an appropriate Python lexer and then adds | |
61 | information about IPython specific keywords (i.e. magic commands, |
|
61 | information about IPython specific keywords (i.e. magic commands, | |
62 | shell commands, etc.) |
|
62 | shell commands, etc.) | |
63 |
|
63 | |||
64 | Parameters |
|
64 | Parameters | |
65 | ---------- |
|
65 | ---------- | |
66 | python3 : bool |
|
66 | python3 : bool | |
67 | If `True`, then build an IPython lexer from a Python 3 lexer. |
|
67 | If `True`, then build an IPython lexer from a Python 3 lexer. | |
68 |
|
68 | |||
69 | """ |
|
69 | """ | |
70 | # It would be nice to have a single IPython lexer class which takes |
|
70 | # It would be nice to have a single IPython lexer class which takes | |
71 | # a boolean `python3`. But since there are two Python lexer classes, |
|
71 | # a boolean `python3`. But since there are two Python lexer classes, | |
72 | # we will also have two IPython lexer classes. |
|
72 | # we will also have two IPython lexer classes. | |
73 | if python3: |
|
73 | if python3: | |
74 | PyLexer = Python3Lexer |
|
74 | PyLexer = Python3Lexer | |
75 | name = 'IPython3' |
|
75 | name = 'IPython3' | |
76 | aliases = ['ipython3'] |
|
76 | aliases = ['ipython3'] | |
77 | doc = """IPython3 Lexer""" |
|
77 | doc = """IPython3 Lexer""" | |
78 | else: |
|
78 | else: | |
79 | PyLexer = PythonLexer |
|
79 | PyLexer = PythonLexer | |
80 | name = 'IPython' |
|
80 | name = 'IPython' | |
81 | aliases = ['ipython2', 'ipython'] |
|
81 | aliases = ['ipython2', 'ipython'] | |
82 | doc = """IPython Lexer""" |
|
82 | doc = """IPython Lexer""" | |
83 |
|
83 | |||
84 | ipython_tokens = [ |
|
84 | ipython_tokens = [ | |
85 | (r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
85 | (r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
86 | (r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
86 | (r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
87 | (r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))), |
|
87 | (r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))), | |
88 | (r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), |
|
88 | (r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), | |
89 | (r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), |
|
89 | (r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), | |
90 | (r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))), |
|
90 | (r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))), | |
91 | (r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))), |
|
91 | (r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))), | |
92 | (r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
92 | (r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
93 | (r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
93 | (r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
94 | (r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
94 | (r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
95 | (r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))), |
|
95 | (r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))), | |
96 | (r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))), |
|
96 | (r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))), | |
97 | (r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))), |
|
97 | (r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))), | |
98 | (r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
98 | (r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
99 | (r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
99 | (r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
100 | (r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
100 | (r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
101 | (r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
101 | (r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
102 | (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)), |
|
102 | (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)), | |
103 | (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))), |
|
103 | (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))), | |
104 | (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)), |
|
104 | (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)), | |
105 | (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)), |
|
105 | (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)), | |
106 | (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword, |
|
106 | (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword, | |
107 | using(BashLexer), Text)), |
|
107 | using(BashLexer), Text)), | |
108 | (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)), |
|
108 | (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)), | |
109 | (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), |
|
109 | (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), | |
110 | (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), |
|
110 | (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), | |
111 | (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)), |
|
111 | (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)), | |
112 | (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)), |
|
112 | (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)), | |
113 | ] |
|
113 | ] | |
114 |
|
114 | |||
115 | tokens = PyLexer.tokens.copy() |
|
115 | tokens = PyLexer.tokens.copy() | |
116 | tokens['root'] = ipython_tokens + tokens['root'] |
|
116 | tokens['root'] = ipython_tokens + tokens['root'] | |
117 |
|
117 | |||
118 | attrs = {'name': name, 'aliases': aliases, 'filenames': [], |
|
118 | attrs = {'name': name, 'aliases': aliases, 'filenames': [], | |
119 | '__doc__': doc, 'tokens': tokens} |
|
119 | '__doc__': doc, 'tokens': tokens} | |
120 |
|
120 | |||
121 | return type(name, (PyLexer,), attrs) |
|
121 | return type(name, (PyLexer,), attrs) | |
122 |
|
122 | |||
123 |
|
123 | |||
124 | IPython3Lexer = build_ipy_lexer(python3=True) |
|
124 | IPython3Lexer = build_ipy_lexer(python3=True) | |
125 | IPythonLexer = build_ipy_lexer(python3=False) |
|
125 | IPythonLexer = build_ipy_lexer(python3=False) | |
126 |
|
126 | |||
127 |
|
127 | |||
128 | class IPythonPartialTracebackLexer(RegexLexer): |
|
128 | class IPythonPartialTracebackLexer(RegexLexer): | |
129 | """ |
|
129 | """ | |
130 | Partial lexer for IPython tracebacks. |
|
130 | Partial lexer for IPython tracebacks. | |
131 |
|
131 | |||
132 |
Handles all the non-python output. |
|
132 | Handles all the non-python output. | |
133 |
|
133 | |||
134 | """ |
|
134 | """ | |
135 | name = 'IPython Partial Traceback' |
|
135 | name = 'IPython Partial Traceback' | |
136 |
|
136 | |||
137 | tokens = { |
|
137 | tokens = { | |
138 | 'root': [ |
|
138 | 'root': [ | |
139 | # Tracebacks for syntax errors have a different style. |
|
139 | # Tracebacks for syntax errors have a different style. | |
140 | # For both types of tracebacks, we mark the first line with |
|
140 | # For both types of tracebacks, we mark the first line with | |
141 | # Generic.Traceback. For syntax errors, we mark the filename |
|
141 | # Generic.Traceback. For syntax errors, we mark the filename | |
142 | # as we mark the filenames for non-syntax tracebacks. |
|
142 | # as we mark the filenames for non-syntax tracebacks. | |
143 | # |
|
143 | # | |
144 | # These two regexps define how IPythonConsoleLexer finds a |
|
144 | # These two regexps define how IPythonConsoleLexer finds a | |
145 | # traceback. |
|
145 | # traceback. | |
146 | # |
|
146 | # | |
147 | ## Non-syntax traceback |
|
147 | ## Non-syntax traceback | |
148 | (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)), |
|
148 | (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)), | |
149 | ## Syntax traceback |
|
149 | ## Syntax traceback | |
150 | (r'^( File)(.*)(, line )(\d+\n)', |
|
150 | (r'^( File)(.*)(, line )(\d+\n)', | |
151 | bygroups(Generic.Traceback, Name.Namespace, |
|
151 | bygroups(Generic.Traceback, Name.Namespace, | |
152 | Generic.Traceback, Literal.Number.Integer)), |
|
152 | Generic.Traceback, Literal.Number.Integer)), | |
153 |
|
153 | |||
154 | # (Exception Identifier)(Whitespace)(Traceback Message) |
|
154 | # (Exception Identifier)(Whitespace)(Traceback Message) | |
155 | (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)', |
|
155 | (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)', | |
156 | bygroups(Name.Exception, Generic.Whitespace, Text)), |
|
156 | bygroups(Name.Exception, Generic.Whitespace, Text)), | |
157 | # (Module/Filename)(Text)(Callee)(Function Signature) |
|
157 | # (Module/Filename)(Text)(Callee)(Function Signature) | |
158 | # Better options for callee and function signature? |
|
158 | # Better options for callee and function signature? | |
159 | (r'(.*)( in )(.*)(\(.*\)\n)', |
|
159 | (r'(.*)( in )(.*)(\(.*\)\n)', | |
160 | bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)), |
|
160 | bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)), | |
161 | # Regular line: (Whitespace)(Line Number)(Python Code) |
|
161 | # Regular line: (Whitespace)(Line Number)(Python Code) | |
162 | (r'(\s*?)(\d+)(.*?\n)', |
|
162 | (r'(\s*?)(\d+)(.*?\n)', | |
163 | bygroups(Generic.Whitespace, Literal.Number.Integer, Other)), |
|
163 | bygroups(Generic.Whitespace, Literal.Number.Integer, Other)), | |
164 | # Emphasized line: (Arrow)(Line Number)(Python Code) |
|
164 | # Emphasized line: (Arrow)(Line Number)(Python Code) | |
165 | # Using Exception token so arrow color matches the Exception. |
|
165 | # Using Exception token so arrow color matches the Exception. | |
166 | (r'(-*>?\s?)(\d+)(.*?\n)', |
|
166 | (r'(-*>?\s?)(\d+)(.*?\n)', | |
167 | bygroups(Name.Exception, Literal.Number.Integer, Other)), |
|
167 | bygroups(Name.Exception, Literal.Number.Integer, Other)), | |
168 | # (Exception Identifier)(Message) |
|
168 | # (Exception Identifier)(Message) | |
169 | (r'(?u)(^[^\d\W]\w*)(:.*?\n)', |
|
169 | (r'(?u)(^[^\d\W]\w*)(:.*?\n)', | |
170 | bygroups(Name.Exception, Text)), |
|
170 | bygroups(Name.Exception, Text)), | |
171 | # Tag everything else as Other, will be handled later. |
|
171 | # Tag everything else as Other, will be handled later. | |
172 | (r'.*\n', Other), |
|
172 | (r'.*\n', Other), | |
173 | ], |
|
173 | ], | |
174 | } |
|
174 | } | |
175 |
|
175 | |||
176 |
|
176 | |||
177 | class IPythonTracebackLexer(DelegatingLexer): |
|
177 | class IPythonTracebackLexer(DelegatingLexer): | |
178 | """ |
|
178 | """ | |
179 | IPython traceback lexer. |
|
179 | IPython traceback lexer. | |
180 |
|
180 | |||
181 | For doctests, the tracebacks can be snipped as much as desired with the |
|
181 | For doctests, the tracebacks can be snipped as much as desired with the | |
182 | exception to the lines that designate a traceback. For non-syntax error |
|
182 | exception to the lines that designate a traceback. For non-syntax error | |
183 | tracebacks, this is the line of hyphens. For syntax error tracebacks, |
|
183 | tracebacks, this is the line of hyphens. For syntax error tracebacks, | |
184 | this is the line which lists the File and line number. |
|
184 | this is the line which lists the File and line number. | |
185 |
|
185 | |||
186 | """ |
|
186 | """ | |
187 | # The lexer inherits from DelegatingLexer. The "root" lexer is an |
|
187 | # The lexer inherits from DelegatingLexer. The "root" lexer is an | |
188 | # appropriate IPython lexer, which depends on the value of the boolean |
|
188 | # appropriate IPython lexer, which depends on the value of the boolean | |
189 | # `python3`. First, we parse with the partial IPython traceback lexer. |
|
189 | # `python3`. First, we parse with the partial IPython traceback lexer. | |
190 | # Then, any code marked with the "Other" token is delegated to the root |
|
190 | # Then, any code marked with the "Other" token is delegated to the root | |
191 | # lexer. |
|
191 | # lexer. | |
192 | # |
|
192 | # | |
193 | name = 'IPython Traceback' |
|
193 | name = 'IPython Traceback' | |
194 | aliases = ['ipythontb'] |
|
194 | aliases = ['ipythontb'] | |
195 |
|
195 | |||
196 | def __init__(self, **options): |
|
196 | def __init__(self, **options): | |
197 | self.python3 = get_bool_opt(options, 'python3', False) |
|
197 | self.python3 = get_bool_opt(options, 'python3', False) | |
198 | if self.python3: |
|
198 | if self.python3: | |
199 | self.aliases = ['ipython3tb'] |
|
199 | self.aliases = ['ipython3tb'] | |
200 | else: |
|
200 | else: | |
201 | self.aliases = ['ipython2tb', 'ipythontb'] |
|
201 | self.aliases = ['ipython2tb', 'ipythontb'] | |
202 |
|
202 | |||
203 | if self.python3: |
|
203 | if self.python3: | |
204 | IPyLexer = IPython3Lexer |
|
204 | IPyLexer = IPython3Lexer | |
205 | else: |
|
205 | else: | |
206 | IPyLexer = IPythonLexer |
|
206 | IPyLexer = IPythonLexer | |
207 |
|
207 | |||
208 | DelegatingLexer.__init__(self, IPyLexer, |
|
208 | DelegatingLexer.__init__(self, IPyLexer, | |
209 | IPythonPartialTracebackLexer, **options) |
|
209 | IPythonPartialTracebackLexer, **options) | |
210 |
|
210 | |||
211 | class IPythonConsoleLexer(Lexer): |
|
211 | class IPythonConsoleLexer(Lexer): | |
212 | """ |
|
212 | """ | |
213 | An IPython console lexer for IPython code-blocks and doctests, such as: |
|
213 | An IPython console lexer for IPython code-blocks and doctests, such as: | |
214 |
|
214 | |||
215 | .. code-block:: rst |
|
215 | .. code-block:: rst | |
216 |
|
216 | |||
217 | .. code-block:: ipythonconsole |
|
217 | .. code-block:: ipythonconsole | |
218 |
|
218 | |||
219 | In [1]: a = 'foo' |
|
219 | In [1]: a = 'foo' | |
220 |
|
220 | |||
221 | In [2]: a |
|
221 | In [2]: a | |
222 | Out[2]: 'foo' |
|
222 | Out[2]: 'foo' | |
223 |
|
223 | |||
224 | In [3]: print a |
|
224 | In [3]: print a | |
225 | foo |
|
225 | foo | |
226 |
|
226 | |||
227 | In [4]: 1 / 0 |
|
227 | In [4]: 1 / 0 | |
228 |
|
228 | |||
229 |
|
229 | |||
230 | Support is also provided for IPython exceptions: |
|
230 | Support is also provided for IPython exceptions: | |
231 |
|
231 | |||
232 | .. code-block:: rst |
|
232 | .. code-block:: rst | |
233 |
|
233 | |||
234 | .. code-block:: ipythonconsole |
|
234 | .. code-block:: ipythonconsole | |
235 |
|
235 | |||
236 | In [1]: raise Exception |
|
236 | In [1]: raise Exception | |
237 |
|
237 | |||
238 | --------------------------------------------------------------------------- |
|
238 | --------------------------------------------------------------------------- | |
239 | Exception Traceback (most recent call last) |
|
239 | Exception Traceback (most recent call last) | |
240 | <ipython-input-1-fca2ab0ca76b> in <module> |
|
240 | <ipython-input-1-fca2ab0ca76b> in <module> | |
241 | ----> 1 raise Exception |
|
241 | ----> 1 raise Exception | |
242 |
|
242 | |||
243 | Exception: |
|
243 | Exception: | |
244 |
|
244 | |||
245 | """ |
|
245 | """ | |
246 | name = 'IPython console session' |
|
246 | name = 'IPython console session' | |
247 | aliases = ['ipythonconsole'] |
|
247 | aliases = ['ipythonconsole'] | |
248 | mimetypes = ['text/x-ipython-console'] |
|
248 | mimetypes = ['text/x-ipython-console'] | |
249 |
|
249 | |||
250 | # The regexps used to determine what is input and what is output. |
|
250 | # The regexps used to determine what is input and what is output. | |
251 | # The default prompts for IPython are: |
|
251 | # The default prompts for IPython are: | |
252 | # |
|
252 | # | |
253 | # in = 'In [#]: ' |
|
253 | # in = 'In [#]: ' | |
254 | # continuation = ' .D.: ' |
|
254 | # continuation = ' .D.: ' | |
255 | # template = 'Out[#]: ' |
|
255 | # template = 'Out[#]: ' | |
256 | # |
|
256 | # | |
257 | # Where '#' is the 'prompt number' or 'execution count' and 'D' |
|
257 | # Where '#' is the 'prompt number' or 'execution count' and 'D' | |
258 | # D is a number of dots matching the width of the execution count |
|
258 | # D is a number of dots matching the width of the execution count | |
259 | # |
|
259 | # | |
260 | in1_regex = r'In \[[0-9]+\]: ' |
|
260 | in1_regex = r'In \[[0-9]+\]: ' | |
261 | in2_regex = r' \.\.+\.: ' |
|
261 | in2_regex = r' \.\.+\.: ' | |
262 | out_regex = r'Out\[[0-9]+\]: ' |
|
262 | out_regex = r'Out\[[0-9]+\]: ' | |
263 |
|
263 | |||
264 | #: The regex to determine when a traceback starts. |
|
264 | #: The regex to determine when a traceback starts. | |
265 | ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)') |
|
265 | ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)') | |
266 |
|
266 | |||
267 | def __init__(self, **options): |
|
267 | def __init__(self, **options): | |
268 | """Initialize the IPython console lexer. |
|
268 | """Initialize the IPython console lexer. | |
269 |
|
269 | |||
270 | Parameters |
|
270 | Parameters | |
271 | ---------- |
|
271 | ---------- | |
272 | python3 : bool |
|
272 | python3 : bool | |
273 | If `True`, then the console inputs are parsed using a Python 3 |
|
273 | If `True`, then the console inputs are parsed using a Python 3 | |
274 | lexer. Otherwise, they are parsed using a Python 2 lexer. |
|
274 | lexer. Otherwise, they are parsed using a Python 2 lexer. | |
275 | in1_regex : RegexObject |
|
275 | in1_regex : RegexObject | |
276 | The compiled regular expression used to detect the start |
|
276 | The compiled regular expression used to detect the start | |
277 | of inputs. Although the IPython configuration setting may have a |
|
277 | of inputs. Although the IPython configuration setting may have a | |
278 | trailing whitespace, do not include it in the regex. If `None`, |
|
278 | trailing whitespace, do not include it in the regex. If `None`, | |
279 | then the default input prompt is assumed. |
|
279 | then the default input prompt is assumed. | |
280 | in2_regex : RegexObject |
|
280 | in2_regex : RegexObject | |
281 | The compiled regular expression used to detect the continuation |
|
281 | The compiled regular expression used to detect the continuation | |
282 | of inputs. Although the IPython configuration setting may have a |
|
282 | of inputs. Although the IPython configuration setting may have a | |
283 | trailing whitespace, do not include it in the regex. If `None`, |
|
283 | trailing whitespace, do not include it in the regex. If `None`, | |
284 | then the default input prompt is assumed. |
|
284 | then the default input prompt is assumed. | |
285 | out_regex : RegexObject |
|
285 | out_regex : RegexObject | |
286 | The compiled regular expression used to detect outputs. If `None`, |
|
286 | The compiled regular expression used to detect outputs. If `None`, | |
287 | then the default output prompt is assumed. |
|
287 | then the default output prompt is assumed. | |
288 |
|
288 | |||
289 | """ |
|
289 | """ | |
290 | self.python3 = get_bool_opt(options, 'python3', False) |
|
290 | self.python3 = get_bool_opt(options, 'python3', False) | |
291 | if self.python3: |
|
291 | if self.python3: | |
292 | self.aliases = ['ipython3console'] |
|
292 | self.aliases = ['ipython3console'] | |
293 | else: |
|
293 | else: | |
294 | self.aliases = ['ipython2console', 'ipythonconsole'] |
|
294 | self.aliases = ['ipython2console', 'ipythonconsole'] | |
295 |
|
295 | |||
296 | in1_regex = options.get('in1_regex', self.in1_regex) |
|
296 | in1_regex = options.get('in1_regex', self.in1_regex) | |
297 | in2_regex = options.get('in2_regex', self.in2_regex) |
|
297 | in2_regex = options.get('in2_regex', self.in2_regex) | |
298 | out_regex = options.get('out_regex', self.out_regex) |
|
298 | out_regex = options.get('out_regex', self.out_regex) | |
299 |
|
299 | |||
300 | # So that we can work with input and output prompts which have been |
|
300 | # So that we can work with input and output prompts which have been | |
301 | # rstrip'd (possibly by editors) we also need rstrip'd variants. If |
|
301 | # rstrip'd (possibly by editors) we also need rstrip'd variants. If | |
302 | # we do not do this, then such prompts will be tagged as 'output'. |
|
302 | # we do not do this, then such prompts will be tagged as 'output'. | |
303 | # The reason can't just use the rstrip'd variants instead is because |
|
303 | # The reason can't just use the rstrip'd variants instead is because | |
304 | # we want any whitespace associated with the prompt to be inserted |
|
304 | # we want any whitespace associated with the prompt to be inserted | |
305 | # with the token. This allows formatted code to be modified so as hide |
|
305 | # with the token. This allows formatted code to be modified so as hide | |
306 | # the appearance of prompts, with the whitespace included. One example |
|
306 | # the appearance of prompts, with the whitespace included. One example | |
307 | # use of this is in copybutton.js from the standard lib Python docs. |
|
307 | # use of this is in copybutton.js from the standard lib Python docs. | |
308 | in1_regex_rstrip = in1_regex.rstrip() + '\n' |
|
308 | in1_regex_rstrip = in1_regex.rstrip() + '\n' | |
309 | in2_regex_rstrip = in2_regex.rstrip() + '\n' |
|
309 | in2_regex_rstrip = in2_regex.rstrip() + '\n' | |
310 | out_regex_rstrip = out_regex.rstrip() + '\n' |
|
310 | out_regex_rstrip = out_regex.rstrip() + '\n' | |
311 |
|
311 | |||
312 | # Compile and save them all. |
|
312 | # Compile and save them all. | |
313 | attrs = ['in1_regex', 'in2_regex', 'out_regex', |
|
313 | attrs = ['in1_regex', 'in2_regex', 'out_regex', | |
314 | 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip'] |
|
314 | 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip'] | |
315 | for attr in attrs: |
|
315 | for attr in attrs: | |
316 | self.__setattr__(attr, re.compile(locals()[attr])) |
|
316 | self.__setattr__(attr, re.compile(locals()[attr])) | |
317 |
|
317 | |||
318 | Lexer.__init__(self, **options) |
|
318 | Lexer.__init__(self, **options) | |
319 |
|
319 | |||
320 | if self.python3: |
|
320 | if self.python3: | |
321 | pylexer = IPython3Lexer |
|
321 | pylexer = IPython3Lexer | |
322 | tblexer = IPythonTracebackLexer |
|
322 | tblexer = IPythonTracebackLexer | |
323 | else: |
|
323 | else: | |
324 | pylexer = IPythonLexer |
|
324 | pylexer = IPythonLexer | |
325 | tblexer = IPythonTracebackLexer |
|
325 | tblexer = IPythonTracebackLexer | |
326 |
|
326 | |||
327 | self.pylexer = pylexer(**options) |
|
327 | self.pylexer = pylexer(**options) | |
328 | self.tblexer = tblexer(**options) |
|
328 | self.tblexer = tblexer(**options) | |
329 |
|
329 | |||
330 | self.reset() |
|
330 | self.reset() | |
331 |
|
331 | |||
332 | def reset(self): |
|
332 | def reset(self): | |
333 | self.mode = 'output' |
|
333 | self.mode = 'output' | |
334 | self.index = 0 |
|
334 | self.index = 0 | |
335 | self.buffer = u'' |
|
335 | self.buffer = u'' | |
336 | self.insertions = [] |
|
336 | self.insertions = [] | |
337 |
|
337 | |||
338 | def buffered_tokens(self): |
|
338 | def buffered_tokens(self): | |
339 | """ |
|
339 | """ | |
340 | Generator of unprocessed tokens after doing insertions and before |
|
340 | Generator of unprocessed tokens after doing insertions and before | |
341 | changing to a new state. |
|
341 | changing to a new state. | |
342 |
|
342 | |||
343 | """ |
|
343 | """ | |
344 | if self.mode == 'output': |
|
344 | if self.mode == 'output': | |
345 | tokens = [(0, Generic.Output, self.buffer)] |
|
345 | tokens = [(0, Generic.Output, self.buffer)] | |
346 | elif self.mode == 'input': |
|
346 | elif self.mode == 'input': | |
347 | tokens = self.pylexer.get_tokens_unprocessed(self.buffer) |
|
347 | tokens = self.pylexer.get_tokens_unprocessed(self.buffer) | |
348 | else: # traceback |
|
348 | else: # traceback | |
349 | tokens = self.tblexer.get_tokens_unprocessed(self.buffer) |
|
349 | tokens = self.tblexer.get_tokens_unprocessed(self.buffer) | |
350 |
|
350 | |||
351 | for i, t, v in do_insertions(self.insertions, tokens): |
|
351 | for i, t, v in do_insertions(self.insertions, tokens): | |
352 | # All token indexes are relative to the buffer. |
|
352 | # All token indexes are relative to the buffer. | |
353 | yield self.index + i, t, v |
|
353 | yield self.index + i, t, v | |
354 |
|
354 | |||
355 | # Clear it all |
|
355 | # Clear it all | |
356 | self.index += len(self.buffer) |
|
356 | self.index += len(self.buffer) | |
357 | self.buffer = u'' |
|
357 | self.buffer = u'' | |
358 | self.insertions = [] |
|
358 | self.insertions = [] | |
359 |
|
359 | |||
360 | def get_mci(self, line): |
|
360 | def get_mci(self, line): | |
361 | """ |
|
361 | """ | |
362 | Parses the line and returns a 3-tuple: (mode, code, insertion). |
|
362 | Parses the line and returns a 3-tuple: (mode, code, insertion). | |
363 |
|
363 | |||
364 | `mode` is the next mode (or state) of the lexer, and is always equal |
|
364 | `mode` is the next mode (or state) of the lexer, and is always equal | |
365 | to 'input', 'output', or 'tb'. |
|
365 | to 'input', 'output', or 'tb'. | |
366 |
|
366 | |||
367 | `code` is a portion of the line that should be added to the buffer |
|
367 | `code` is a portion of the line that should be added to the buffer | |
368 | corresponding to the next mode and eventually lexed by another lexer. |
|
368 | corresponding to the next mode and eventually lexed by another lexer. | |
369 | For example, `code` could be Python code if `mode` were 'input'. |
|
369 | For example, `code` could be Python code if `mode` were 'input'. | |
370 |
|
370 | |||
371 | `insertion` is a 3-tuple (index, token, text) representing an |
|
371 | `insertion` is a 3-tuple (index, token, text) representing an | |
372 | unprocessed "token" that will be inserted into the stream of tokens |
|
372 | unprocessed "token" that will be inserted into the stream of tokens | |
373 | that are created from the buffer once we change modes. This is usually |
|
373 | that are created from the buffer once we change modes. This is usually | |
374 | the input or output prompt. |
|
374 | the input or output prompt. | |
375 |
|
375 | |||
376 | In general, the next mode depends on current mode and on the contents |
|
376 | In general, the next mode depends on current mode and on the contents | |
377 | of `line`. |
|
377 | of `line`. | |
378 |
|
378 | |||
379 | """ |
|
379 | """ | |
380 | # To reduce the number of regex match checks, we have multiple |
|
380 | # To reduce the number of regex match checks, we have multiple | |
381 | # 'if' blocks instead of 'if-elif' blocks. |
|
381 | # 'if' blocks instead of 'if-elif' blocks. | |
382 |
|
382 | |||
383 | # Check for possible end of input |
|
383 | # Check for possible end of input | |
384 | in2_match = self.in2_regex.match(line) |
|
384 | in2_match = self.in2_regex.match(line) | |
385 | in2_match_rstrip = self.in2_regex_rstrip.match(line) |
|
385 | in2_match_rstrip = self.in2_regex_rstrip.match(line) | |
386 | if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \ |
|
386 | if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \ | |
387 | in2_match_rstrip: |
|
387 | in2_match_rstrip: | |
388 | end_input = True |
|
388 | end_input = True | |
389 | else: |
|
389 | else: | |
390 | end_input = False |
|
390 | end_input = False | |
391 | if end_input and self.mode != 'tb': |
|
391 | if end_input and self.mode != 'tb': | |
392 | # Only look for an end of input when not in tb mode. |
|
392 | # Only look for an end of input when not in tb mode. | |
393 | # An ellipsis could appear within the traceback. |
|
393 | # An ellipsis could appear within the traceback. | |
394 | mode = 'output' |
|
394 | mode = 'output' | |
395 | code = u'' |
|
395 | code = u'' | |
396 | insertion = (0, Generic.Prompt, line) |
|
396 | insertion = (0, Generic.Prompt, line) | |
397 | return mode, code, insertion |
|
397 | return mode, code, insertion | |
398 |
|
398 | |||
399 | # Check for output prompt |
|
399 | # Check for output prompt | |
400 | out_match = self.out_regex.match(line) |
|
400 | out_match = self.out_regex.match(line) | |
401 | out_match_rstrip = self.out_regex_rstrip.match(line) |
|
401 | out_match_rstrip = self.out_regex_rstrip.match(line) | |
402 | if out_match or out_match_rstrip: |
|
402 | if out_match or out_match_rstrip: | |
403 | mode = 'output' |
|
403 | mode = 'output' | |
404 | if out_match: |
|
404 | if out_match: | |
405 | idx = out_match.end() |
|
405 | idx = out_match.end() | |
406 | else: |
|
406 | else: | |
407 | idx = out_match_rstrip.end() |
|
407 | idx = out_match_rstrip.end() | |
408 | code = line[idx:] |
|
408 | code = line[idx:] | |
409 | # Use the 'heading' token for output. We cannot use Generic.Error |
|
409 | # Use the 'heading' token for output. We cannot use Generic.Error | |
410 | # since it would conflict with exceptions. |
|
410 | # since it would conflict with exceptions. | |
411 | insertion = (0, Generic.Heading, line[:idx]) |
|
411 | insertion = (0, Generic.Heading, line[:idx]) | |
412 | return mode, code, insertion |
|
412 | return mode, code, insertion | |
413 |
|
413 | |||
414 |
|
414 | |||
415 | # Check for input or continuation prompt (non stripped version) |
|
415 | # Check for input or continuation prompt (non stripped version) | |
416 | in1_match = self.in1_regex.match(line) |
|
416 | in1_match = self.in1_regex.match(line) | |
417 | if in1_match or (in2_match and self.mode != 'tb'): |
|
417 | if in1_match or (in2_match and self.mode != 'tb'): | |
418 | # New input or when not in tb, continued input. |
|
418 | # New input or when not in tb, continued input. | |
419 | # We do not check for continued input when in tb since it is |
|
419 | # We do not check for continued input when in tb since it is | |
420 | # allowable to replace a long stack with an ellipsis. |
|
420 | # allowable to replace a long stack with an ellipsis. | |
421 | mode = 'input' |
|
421 | mode = 'input' | |
422 | if in1_match: |
|
422 | if in1_match: | |
423 | idx = in1_match.end() |
|
423 | idx = in1_match.end() | |
424 | else: # in2_match |
|
424 | else: # in2_match | |
425 | idx = in2_match.end() |
|
425 | idx = in2_match.end() | |
426 | code = line[idx:] |
|
426 | code = line[idx:] | |
427 | insertion = (0, Generic.Prompt, line[:idx]) |
|
427 | insertion = (0, Generic.Prompt, line[:idx]) | |
428 | return mode, code, insertion |
|
428 | return mode, code, insertion | |
429 |
|
429 | |||
430 | # Check for input or continuation prompt (stripped version) |
|
430 | # Check for input or continuation prompt (stripped version) | |
431 | in1_match_rstrip = self.in1_regex_rstrip.match(line) |
|
431 | in1_match_rstrip = self.in1_regex_rstrip.match(line) | |
432 | if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'): |
|
432 | if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'): | |
433 | # New input or when not in tb, continued input. |
|
433 | # New input or when not in tb, continued input. | |
434 | # We do not check for continued input when in tb since it is |
|
434 | # We do not check for continued input when in tb since it is | |
435 | # allowable to replace a long stack with an ellipsis. |
|
435 | # allowable to replace a long stack with an ellipsis. | |
436 | mode = 'input' |
|
436 | mode = 'input' | |
437 | if in1_match_rstrip: |
|
437 | if in1_match_rstrip: | |
438 | idx = in1_match_rstrip.end() |
|
438 | idx = in1_match_rstrip.end() | |
439 | else: # in2_match |
|
439 | else: # in2_match | |
440 | idx = in2_match_rstrip.end() |
|
440 | idx = in2_match_rstrip.end() | |
441 | code = line[idx:] |
|
441 | code = line[idx:] | |
442 | insertion = (0, Generic.Prompt, line[:idx]) |
|
442 | insertion = (0, Generic.Prompt, line[:idx]) | |
443 | return mode, code, insertion |
|
443 | return mode, code, insertion | |
444 |
|
444 | |||
445 | # Check for traceback |
|
445 | # Check for traceback | |
446 | if self.ipytb_start.match(line): |
|
446 | if self.ipytb_start.match(line): | |
447 | mode = 'tb' |
|
447 | mode = 'tb' | |
448 | code = line |
|
448 | code = line | |
449 | insertion = None |
|
449 | insertion = None | |
450 | return mode, code, insertion |
|
450 | return mode, code, insertion | |
451 |
|
451 | |||
452 | # All other stuff... |
|
452 | # All other stuff... | |
453 | if self.mode in ('input', 'output'): |
|
453 | if self.mode in ('input', 'output'): | |
454 | # We assume all other text is output. Multiline input that |
|
454 | # We assume all other text is output. Multiline input that | |
455 | # does not use the continuation marker cannot be detected. |
|
455 | # does not use the continuation marker cannot be detected. | |
456 | # For example, the 3 in the following is clearly output: |
|
456 | # For example, the 3 in the following is clearly output: | |
457 | # |
|
457 | # | |
458 | # In [1]: print 3 |
|
458 | # In [1]: print 3 | |
459 | # 3 |
|
459 | # 3 | |
460 | # |
|
460 | # | |
461 | # But the following second line is part of the input: |
|
461 | # But the following second line is part of the input: | |
462 | # |
|
462 | # | |
463 | # In [2]: while True: |
|
463 | # In [2]: while True: | |
464 | # print True |
|
464 | # print True | |
465 | # |
|
465 | # | |
466 | # In both cases, the 2nd line will be 'output'. |
|
466 | # In both cases, the 2nd line will be 'output'. | |
467 | # |
|
467 | # | |
468 | mode = 'output' |
|
468 | mode = 'output' | |
469 | else: |
|
469 | else: | |
470 | mode = 'tb' |
|
470 | mode = 'tb' | |
471 |
|
471 | |||
472 | code = line |
|
472 | code = line | |
473 | insertion = None |
|
473 | insertion = None | |
474 |
|
474 | |||
475 | return mode, code, insertion |
|
475 | return mode, code, insertion | |
476 |
|
476 | |||
477 | def get_tokens_unprocessed(self, text): |
|
477 | def get_tokens_unprocessed(self, text): | |
478 | self.reset() |
|
478 | self.reset() | |
479 | for match in line_re.finditer(text): |
|
479 | for match in line_re.finditer(text): | |
480 | line = match.group() |
|
480 | line = match.group() | |
481 | mode, code, insertion = self.get_mci(line) |
|
481 | mode, code, insertion = self.get_mci(line) | |
482 |
|
482 | |||
483 | if mode != self.mode: |
|
483 | if mode != self.mode: | |
484 | # Yield buffered tokens before transitioning to new mode. |
|
484 | # Yield buffered tokens before transitioning to new mode. | |
485 | for token in self.buffered_tokens(): |
|
485 | for token in self.buffered_tokens(): | |
486 | yield token |
|
486 | yield token | |
487 | self.mode = mode |
|
487 | self.mode = mode | |
488 |
|
488 | |||
489 | if insertion: |
|
489 | if insertion: | |
490 | self.insertions.append((len(self.buffer), [insertion])) |
|
490 | self.insertions.append((len(self.buffer), [insertion])) | |
491 | self.buffer += code |
|
491 | self.buffer += code | |
492 |
|
492 | |||
493 | for token in self.buffered_tokens(): |
|
493 | for token in self.buffered_tokens(): | |
494 | yield token |
|
494 | yield token | |
495 |
|
495 | |||
496 | class IPyLexer(Lexer): |
|
496 | class IPyLexer(Lexer): | |
497 | r""" |
|
497 | r""" | |
498 | Primary lexer for all IPython-like code. |
|
498 | Primary lexer for all IPython-like code. | |
499 |
|
499 | |||
500 | This is a simple helper lexer. If the first line of the text begins with |
|
500 | This is a simple helper lexer. If the first line of the text begins with | |
501 | "In \[[0-9]+\]:", then the entire text is parsed with an IPython console |
|
501 | "In \[[0-9]+\]:", then the entire text is parsed with an IPython console | |
502 | lexer. If not, then the entire text is parsed with an IPython lexer. |
|
502 | lexer. If not, then the entire text is parsed with an IPython lexer. | |
503 |
|
503 | |||
504 | The goal is to reduce the number of lexers that are registered |
|
504 | The goal is to reduce the number of lexers that are registered | |
505 | with Pygments. |
|
505 | with Pygments. | |
506 |
|
506 | |||
507 | """ |
|
507 | """ | |
508 | name = 'IPy session' |
|
508 | name = 'IPy session' | |
509 | aliases = ['ipy'] |
|
509 | aliases = ['ipy'] | |
510 |
|
510 | |||
511 | def __init__(self, **options): |
|
511 | def __init__(self, **options): | |
512 | self.python3 = get_bool_opt(options, 'python3', False) |
|
512 | self.python3 = get_bool_opt(options, 'python3', False) | |
513 | if self.python3: |
|
513 | if self.python3: | |
514 | self.aliases = ['ipy3'] |
|
514 | self.aliases = ['ipy3'] | |
515 | else: |
|
515 | else: | |
516 | self.aliases = ['ipy2', 'ipy'] |
|
516 | self.aliases = ['ipy2', 'ipy'] | |
517 |
|
517 | |||
518 | Lexer.__init__(self, **options) |
|
518 | Lexer.__init__(self, **options) | |
519 |
|
519 | |||
520 | self.IPythonLexer = IPythonLexer(**options) |
|
520 | self.IPythonLexer = IPythonLexer(**options) | |
521 | self.IPythonConsoleLexer = IPythonConsoleLexer(**options) |
|
521 | self.IPythonConsoleLexer = IPythonConsoleLexer(**options) | |
522 |
|
522 | |||
523 | def get_tokens_unprocessed(self, text): |
|
523 | def get_tokens_unprocessed(self, text): | |
524 | # Search for the input prompt anywhere...this allows code blocks to |
|
524 | # Search for the input prompt anywhere...this allows code blocks to | |
525 | # begin with comments as well. |
|
525 | # begin with comments as well. | |
526 | if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL): |
|
526 | if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL): | |
527 | lex = self.IPythonConsoleLexer |
|
527 | lex = self.IPythonConsoleLexer | |
528 | else: |
|
528 | else: | |
529 | lex = self.IPythonLexer |
|
529 | lex = self.IPythonLexer | |
530 | for token in lex.get_tokens_unprocessed(text): |
|
530 | for token in lex.get_tokens_unprocessed(text): | |
531 | yield token |
|
531 | yield token | |
532 |
|
532 |
@@ -1,871 +1,856 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | Python advanced pretty printer. This pretty printer is intended to |
|
3 | Python advanced pretty printer. This pretty printer is intended to | |
4 | replace the old `pprint` python module which does not allow developers |
|
4 | replace the old `pprint` python module which does not allow developers | |
5 | to provide their own pretty print callbacks. |
|
5 | to provide their own pretty print callbacks. | |
6 |
|
6 | |||
7 | This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`. |
|
7 | This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`. | |
8 |
|
8 | |||
9 |
|
9 | |||
10 | Example Usage |
|
10 | Example Usage | |
11 | ------------- |
|
11 | ------------- | |
12 |
|
12 | |||
13 | To directly print the representation of an object use `pprint`:: |
|
13 | To directly print the representation of an object use `pprint`:: | |
14 |
|
14 | |||
15 | from pretty import pprint |
|
15 | from pretty import pprint | |
16 | pprint(complex_object) |
|
16 | pprint(complex_object) | |
17 |
|
17 | |||
18 | To get a string of the output use `pretty`:: |
|
18 | To get a string of the output use `pretty`:: | |
19 |
|
19 | |||
20 | from pretty import pretty |
|
20 | from pretty import pretty | |
21 | string = pretty(complex_object) |
|
21 | string = pretty(complex_object) | |
22 |
|
22 | |||
23 |
|
23 | |||
24 | Extending |
|
24 | Extending | |
25 | --------- |
|
25 | --------- | |
26 |
|
26 | |||
27 | The pretty library allows developers to add pretty printing rules for their |
|
27 | The pretty library allows developers to add pretty printing rules for their | |
28 | own objects. This process is straightforward. All you have to do is to |
|
28 | own objects. This process is straightforward. All you have to do is to | |
29 | add a `_repr_pretty_` method to your object and call the methods on the |
|
29 | add a `_repr_pretty_` method to your object and call the methods on the | |
30 | pretty printer passed:: |
|
30 | pretty printer passed:: | |
31 |
|
31 | |||
32 | class MyObject(object): |
|
32 | class MyObject(object): | |
33 |
|
33 | |||
34 | def _repr_pretty_(self, p, cycle): |
|
34 | def _repr_pretty_(self, p, cycle): | |
35 | ... |
|
35 | ... | |
36 |
|
36 | |||
37 | Here is an example implementation of a `_repr_pretty_` method for a list |
|
37 | Here is an example implementation of a `_repr_pretty_` method for a list | |
38 | subclass:: |
|
38 | subclass:: | |
39 |
|
39 | |||
40 | class MyList(list): |
|
40 | class MyList(list): | |
41 |
|
41 | |||
42 | def _repr_pretty_(self, p, cycle): |
|
42 | def _repr_pretty_(self, p, cycle): | |
43 | if cycle: |
|
43 | if cycle: | |
44 | p.text('MyList(...)') |
|
44 | p.text('MyList(...)') | |
45 | else: |
|
45 | else: | |
46 | with p.group(8, 'MyList([', '])'): |
|
46 | with p.group(8, 'MyList([', '])'): | |
47 | for idx, item in enumerate(self): |
|
47 | for idx, item in enumerate(self): | |
48 | if idx: |
|
48 | if idx: | |
49 | p.text(',') |
|
49 | p.text(',') | |
50 | p.breakable() |
|
50 | p.breakable() | |
51 | p.pretty(item) |
|
51 | p.pretty(item) | |
52 |
|
52 | |||
53 | The `cycle` parameter is `True` if pretty detected a cycle. You *have* to |
|
53 | The `cycle` parameter is `True` if pretty detected a cycle. You *have* to | |
54 | react to that or the result is an infinite loop. `p.text()` just adds |
|
54 | react to that or the result is an infinite loop. `p.text()` just adds | |
55 | non breaking text to the output, `p.breakable()` either adds a whitespace |
|
55 | non breaking text to the output, `p.breakable()` either adds a whitespace | |
56 | or breaks here. If you pass it an argument it's used instead of the |
|
56 | or breaks here. If you pass it an argument it's used instead of the | |
57 | default space. `p.pretty` prettyprints another object using the pretty print |
|
57 | default space. `p.pretty` prettyprints another object using the pretty print | |
58 | method. |
|
58 | method. | |
59 |
|
59 | |||
60 | The first parameter to the `group` function specifies the extra indentation |
|
60 | The first parameter to the `group` function specifies the extra indentation | |
61 | of the next line. In this example the next item will either be on the same |
|
61 | of the next line. In this example the next item will either be on the same | |
62 | line (if the items are short enough) or aligned with the right edge of the |
|
62 | line (if the items are short enough) or aligned with the right edge of the | |
63 | opening bracket of `MyList`. |
|
63 | opening bracket of `MyList`. | |
64 |
|
64 | |||
65 | If you just want to indent something you can use the group function |
|
65 | If you just want to indent something you can use the group function | |
66 | without open / close parameters. You can also use this code:: |
|
66 | without open / close parameters. You can also use this code:: | |
67 |
|
67 | |||
68 | with p.indent(2): |
|
68 | with p.indent(2): | |
69 | ... |
|
69 | ... | |
70 |
|
70 | |||
71 | Inheritance diagram: |
|
71 | Inheritance diagram: | |
72 |
|
72 | |||
73 | .. inheritance-diagram:: IPython.lib.pretty |
|
73 | .. inheritance-diagram:: IPython.lib.pretty | |
74 | :parts: 3 |
|
74 | :parts: 3 | |
75 |
|
75 | |||
76 | :copyright: 2007 by Armin Ronacher. |
|
76 | :copyright: 2007 by Armin Ronacher. | |
77 | Portions (c) 2009 by Robert Kern. |
|
77 | Portions (c) 2009 by Robert Kern. | |
78 | :license: BSD License. |
|
78 | :license: BSD License. | |
79 | """ |
|
79 | """ | |
80 |
|
80 | |||
81 | from contextlib import contextmanager |
|
81 | from contextlib import contextmanager | |
82 | import datetime |
|
82 | import datetime | |
83 | import os |
|
83 | import os | |
84 | import re |
|
84 | import re | |
85 | import sys |
|
85 | import sys | |
86 | import types |
|
86 | import types | |
87 | from collections import deque |
|
87 | from collections import deque | |
88 | from inspect import signature |
|
88 | from inspect import signature | |
89 | from io import StringIO |
|
89 | from io import StringIO | |
90 | from warnings import warn |
|
90 | from warnings import warn | |
91 |
|
91 | |||
92 | from IPython.utils.decorators import undoc |
|
92 | from IPython.utils.decorators import undoc | |
93 | from IPython.utils.py3compat import PYPY |
|
93 | from IPython.utils.py3compat import PYPY | |
94 |
|
94 | |||
95 | __all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter', |
|
95 | __all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter', | |
96 | 'for_type', 'for_type_by_name'] |
|
96 | 'for_type', 'for_type_by_name'] | |
97 |
|
97 | |||
98 |
|
98 | |||
99 | MAX_SEQ_LENGTH = 1000 |
|
99 | MAX_SEQ_LENGTH = 1000 | |
100 | _re_pattern_type = type(re.compile('')) |
|
100 | _re_pattern_type = type(re.compile('')) | |
101 |
|
101 | |||
102 | def _safe_getattr(obj, attr, default=None): |
|
102 | def _safe_getattr(obj, attr, default=None): | |
103 | """Safe version of getattr. |
|
103 | """Safe version of getattr. | |
104 |
|
104 | |||
105 | Same as getattr, but will return ``default`` on any Exception, |
|
105 | Same as getattr, but will return ``default`` on any Exception, | |
106 | rather than raising. |
|
106 | rather than raising. | |
107 | """ |
|
107 | """ | |
108 | try: |
|
108 | try: | |
109 | return getattr(obj, attr, default) |
|
109 | return getattr(obj, attr, default) | |
110 | except Exception: |
|
110 | except Exception: | |
111 | return default |
|
111 | return default | |
112 |
|
112 | |||
113 | @undoc |
|
113 | @undoc | |
114 | class CUnicodeIO(StringIO): |
|
114 | class CUnicodeIO(StringIO): | |
115 | def __init__(self, *args, **kwargs): |
|
115 | def __init__(self, *args, **kwargs): | |
116 | super().__init__(*args, **kwargs) |
|
116 | super().__init__(*args, **kwargs) | |
117 | warn(("CUnicodeIO is deprecated since IPython 6.0. " |
|
117 | warn(("CUnicodeIO is deprecated since IPython 6.0. " | |
118 | "Please use io.StringIO instead."), |
|
118 | "Please use io.StringIO instead."), | |
119 | DeprecationWarning, stacklevel=2) |
|
119 | DeprecationWarning, stacklevel=2) | |
120 |
|
120 | |||
121 | def _sorted_for_pprint(items): |
|
121 | def _sorted_for_pprint(items): | |
122 | """ |
|
122 | """ | |
123 | Sort the given items for pretty printing. Since some predictable |
|
123 | Sort the given items for pretty printing. Since some predictable | |
124 | sorting is better than no sorting at all, we sort on the string |
|
124 | sorting is better than no sorting at all, we sort on the string | |
125 | representation if normal sorting fails. |
|
125 | representation if normal sorting fails. | |
126 | """ |
|
126 | """ | |
127 | items = list(items) |
|
127 | items = list(items) | |
128 | try: |
|
128 | try: | |
129 | return sorted(items) |
|
129 | return sorted(items) | |
130 | except Exception: |
|
130 | except Exception: | |
131 | try: |
|
131 | try: | |
132 | return sorted(items, key=str) |
|
132 | return sorted(items, key=str) | |
133 | except Exception: |
|
133 | except Exception: | |
134 | return items |
|
134 | return items | |
135 |
|
135 | |||
136 | def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): |
|
136 | def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): | |
137 | """ |
|
137 | """ | |
138 | Pretty print the object's representation. |
|
138 | Pretty print the object's representation. | |
139 | """ |
|
139 | """ | |
140 | stream = StringIO() |
|
140 | stream = StringIO() | |
141 | printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length) |
|
141 | printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length) | |
142 | printer.pretty(obj) |
|
142 | printer.pretty(obj) | |
143 | printer.flush() |
|
143 | printer.flush() | |
144 | return stream.getvalue() |
|
144 | return stream.getvalue() | |
145 |
|
145 | |||
146 |
|
146 | |||
147 | def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): |
|
147 | def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): | |
148 | """ |
|
148 | """ | |
149 | Like `pretty` but print to stdout. |
|
149 | Like `pretty` but print to stdout. | |
150 | """ |
|
150 | """ | |
151 | printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length) |
|
151 | printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length) | |
152 | printer.pretty(obj) |
|
152 | printer.pretty(obj) | |
153 | printer.flush() |
|
153 | printer.flush() | |
154 | sys.stdout.write(newline) |
|
154 | sys.stdout.write(newline) | |
155 | sys.stdout.flush() |
|
155 | sys.stdout.flush() | |
156 |
|
156 | |||
157 | class _PrettyPrinterBase(object): |
|
157 | class _PrettyPrinterBase(object): | |
158 |
|
158 | |||
159 | @contextmanager |
|
159 | @contextmanager | |
160 | def indent(self, indent): |
|
160 | def indent(self, indent): | |
161 | """with statement support for indenting/dedenting.""" |
|
161 | """with statement support for indenting/dedenting.""" | |
162 | self.indentation += indent |
|
162 | self.indentation += indent | |
163 | try: |
|
163 | try: | |
164 | yield |
|
164 | yield | |
165 | finally: |
|
165 | finally: | |
166 | self.indentation -= indent |
|
166 | self.indentation -= indent | |
167 |
|
167 | |||
168 | @contextmanager |
|
168 | @contextmanager | |
169 | def group(self, indent=0, open='', close=''): |
|
169 | def group(self, indent=0, open='', close=''): | |
170 | """like begin_group / end_group but for the with statement.""" |
|
170 | """like begin_group / end_group but for the with statement.""" | |
171 | self.begin_group(indent, open) |
|
171 | self.begin_group(indent, open) | |
172 | try: |
|
172 | try: | |
173 | yield |
|
173 | yield | |
174 | finally: |
|
174 | finally: | |
175 | self.end_group(indent, close) |
|
175 | self.end_group(indent, close) | |
176 |
|
176 | |||
177 | class PrettyPrinter(_PrettyPrinterBase): |
|
177 | class PrettyPrinter(_PrettyPrinterBase): | |
178 | """ |
|
178 | """ | |
179 | Baseclass for the `RepresentationPrinter` prettyprinter that is used to |
|
179 | Baseclass for the `RepresentationPrinter` prettyprinter that is used to | |
180 | generate pretty reprs of objects. Contrary to the `RepresentationPrinter` |
|
180 | generate pretty reprs of objects. Contrary to the `RepresentationPrinter` | |
181 | this printer knows nothing about the default pprinters or the `_repr_pretty_` |
|
181 | this printer knows nothing about the default pprinters or the `_repr_pretty_` | |
182 | callback method. |
|
182 | callback method. | |
183 | """ |
|
183 | """ | |
184 |
|
184 | |||
185 | def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): |
|
185 | def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): | |
186 | self.output = output |
|
186 | self.output = output | |
187 | self.max_width = max_width |
|
187 | self.max_width = max_width | |
188 | self.newline = newline |
|
188 | self.newline = newline | |
189 | self.max_seq_length = max_seq_length |
|
189 | self.max_seq_length = max_seq_length | |
190 | self.output_width = 0 |
|
190 | self.output_width = 0 | |
191 | self.buffer_width = 0 |
|
191 | self.buffer_width = 0 | |
192 | self.buffer = deque() |
|
192 | self.buffer = deque() | |
193 |
|
193 | |||
194 | root_group = Group(0) |
|
194 | root_group = Group(0) | |
195 | self.group_stack = [root_group] |
|
195 | self.group_stack = [root_group] | |
196 | self.group_queue = GroupQueue(root_group) |
|
196 | self.group_queue = GroupQueue(root_group) | |
197 | self.indentation = 0 |
|
197 | self.indentation = 0 | |
198 |
|
198 | |||
199 | def _break_one_group(self, group): |
|
199 | def _break_one_group(self, group): | |
200 | while group.breakables: |
|
200 | while group.breakables: | |
201 | x = self.buffer.popleft() |
|
201 | x = self.buffer.popleft() | |
202 | self.output_width = x.output(self.output, self.output_width) |
|
202 | self.output_width = x.output(self.output, self.output_width) | |
203 | self.buffer_width -= x.width |
|
203 | self.buffer_width -= x.width | |
204 | while self.buffer and isinstance(self.buffer[0], Text): |
|
204 | while self.buffer and isinstance(self.buffer[0], Text): | |
205 | x = self.buffer.popleft() |
|
205 | x = self.buffer.popleft() | |
206 | self.output_width = x.output(self.output, self.output_width) |
|
206 | self.output_width = x.output(self.output, self.output_width) | |
207 | self.buffer_width -= x.width |
|
207 | self.buffer_width -= x.width | |
208 |
|
208 | |||
209 | def _break_outer_groups(self): |
|
209 | def _break_outer_groups(self): | |
210 | while self.max_width < self.output_width + self.buffer_width: |
|
210 | while self.max_width < self.output_width + self.buffer_width: | |
211 | group = self.group_queue.deq() |
|
211 | group = self.group_queue.deq() | |
212 | if not group: |
|
212 | if not group: | |
213 | return |
|
213 | return | |
214 | self._break_one_group(group) |
|
214 | self._break_one_group(group) | |
215 |
|
215 | |||
216 | def text(self, obj): |
|
216 | def text(self, obj): | |
217 | """Add literal text to the output.""" |
|
217 | """Add literal text to the output.""" | |
218 | width = len(obj) |
|
218 | width = len(obj) | |
219 | if self.buffer: |
|
219 | if self.buffer: | |
220 | text = self.buffer[-1] |
|
220 | text = self.buffer[-1] | |
221 | if not isinstance(text, Text): |
|
221 | if not isinstance(text, Text): | |
222 | text = Text() |
|
222 | text = Text() | |
223 | self.buffer.append(text) |
|
223 | self.buffer.append(text) | |
224 | text.add(obj, width) |
|
224 | text.add(obj, width) | |
225 | self.buffer_width += width |
|
225 | self.buffer_width += width | |
226 | self._break_outer_groups() |
|
226 | self._break_outer_groups() | |
227 | else: |
|
227 | else: | |
228 | self.output.write(obj) |
|
228 | self.output.write(obj) | |
229 | self.output_width += width |
|
229 | self.output_width += width | |
230 |
|
230 | |||
231 | def breakable(self, sep=' '): |
|
231 | def breakable(self, sep=' '): | |
232 | """ |
|
232 | """ | |
233 | Add a breakable separator to the output. This does not mean that it |
|
233 | Add a breakable separator to the output. This does not mean that it | |
234 | will automatically break here. If no breaking on this position takes |
|
234 | will automatically break here. If no breaking on this position takes | |
235 | place the `sep` is inserted which default to one space. |
|
235 | place the `sep` is inserted which default to one space. | |
236 | """ |
|
236 | """ | |
237 | width = len(sep) |
|
237 | width = len(sep) | |
238 | group = self.group_stack[-1] |
|
238 | group = self.group_stack[-1] | |
239 | if group.want_break: |
|
239 | if group.want_break: | |
240 | self.flush() |
|
240 | self.flush() | |
241 | self.output.write(self.newline) |
|
241 | self.output.write(self.newline) | |
242 | self.output.write(' ' * self.indentation) |
|
242 | self.output.write(' ' * self.indentation) | |
243 | self.output_width = self.indentation |
|
243 | self.output_width = self.indentation | |
244 | self.buffer_width = 0 |
|
244 | self.buffer_width = 0 | |
245 | else: |
|
245 | else: | |
246 | self.buffer.append(Breakable(sep, width, self)) |
|
246 | self.buffer.append(Breakable(sep, width, self)) | |
247 | self.buffer_width += width |
|
247 | self.buffer_width += width | |
248 | self._break_outer_groups() |
|
248 | self._break_outer_groups() | |
249 |
|
249 | |||
250 | def break_(self): |
|
250 | def break_(self): | |
251 | """ |
|
251 | """ | |
252 | Explicitly insert a newline into the output, maintaining correct indentation. |
|
252 | Explicitly insert a newline into the output, maintaining correct indentation. | |
253 | """ |
|
253 | """ | |
254 | group = self.group_queue.deq() |
|
254 | group = self.group_queue.deq() | |
255 | if group: |
|
255 | if group: | |
256 | self._break_one_group(group) |
|
256 | self._break_one_group(group) | |
257 | self.flush() |
|
257 | self.flush() | |
258 | self.output.write(self.newline) |
|
258 | self.output.write(self.newline) | |
259 | self.output.write(' ' * self.indentation) |
|
259 | self.output.write(' ' * self.indentation) | |
260 | self.output_width = self.indentation |
|
260 | self.output_width = self.indentation | |
261 | self.buffer_width = 0 |
|
261 | self.buffer_width = 0 | |
262 |
|
262 | |||
263 |
|
263 | |||
264 | def begin_group(self, indent=0, open=''): |
|
264 | def begin_group(self, indent=0, open=''): | |
265 | """ |
|
265 | """ | |
266 | Begin a group. If you want support for python < 2.5 which doesn't has |
|
266 | Begin a group. | |
267 | the with statement this is the preferred way: |
|
|||
268 |
|
||||
269 | p.begin_group(1, '{') |
|
|||
270 | ... |
|
|||
271 | p.end_group(1, '}') |
|
|||
272 |
|
||||
273 | The python 2.5 expression would be this: |
|
|||
274 |
|
||||
275 | with p.group(1, '{', '}'): |
|
|||
276 | ... |
|
|||
277 |
|
||||
278 | The first parameter specifies the indentation for the next line (usually |
|
267 | The first parameter specifies the indentation for the next line (usually | |
279 | the width of the opening text), the second the opening text. All |
|
268 | the width of the opening text), the second the opening text. All | |
280 | parameters are optional. |
|
269 | parameters are optional. | |
281 | """ |
|
270 | """ | |
282 | if open: |
|
271 | if open: | |
283 | self.text(open) |
|
272 | self.text(open) | |
284 | group = Group(self.group_stack[-1].depth + 1) |
|
273 | group = Group(self.group_stack[-1].depth + 1) | |
285 | self.group_stack.append(group) |
|
274 | self.group_stack.append(group) | |
286 | self.group_queue.enq(group) |
|
275 | self.group_queue.enq(group) | |
287 | self.indentation += indent |
|
276 | self.indentation += indent | |
288 |
|
277 | |||
289 | def _enumerate(self, seq): |
|
278 | def _enumerate(self, seq): | |
290 | """like enumerate, but with an upper limit on the number of items""" |
|
279 | """like enumerate, but with an upper limit on the number of items""" | |
291 | for idx, x in enumerate(seq): |
|
280 | for idx, x in enumerate(seq): | |
292 | if self.max_seq_length and idx >= self.max_seq_length: |
|
281 | if self.max_seq_length and idx >= self.max_seq_length: | |
293 | self.text(',') |
|
282 | self.text(',') | |
294 | self.breakable() |
|
283 | self.breakable() | |
295 | self.text('...') |
|
284 | self.text('...') | |
296 | return |
|
285 | return | |
297 | yield idx, x |
|
286 | yield idx, x | |
298 |
|
287 | |||
299 | def end_group(self, dedent=0, close=''): |
|
288 | def end_group(self, dedent=0, close=''): | |
300 | """End a group. See `begin_group` for more details.""" |
|
289 | """End a group. See `begin_group` for more details.""" | |
301 | self.indentation -= dedent |
|
290 | self.indentation -= dedent | |
302 | group = self.group_stack.pop() |
|
291 | group = self.group_stack.pop() | |
303 | if not group.breakables: |
|
292 | if not group.breakables: | |
304 | self.group_queue.remove(group) |
|
293 | self.group_queue.remove(group) | |
305 | if close: |
|
294 | if close: | |
306 | self.text(close) |
|
295 | self.text(close) | |
307 |
|
296 | |||
308 | def flush(self): |
|
297 | def flush(self): | |
309 | """Flush data that is left in the buffer.""" |
|
298 | """Flush data that is left in the buffer.""" | |
310 | for data in self.buffer: |
|
299 | for data in self.buffer: | |
311 | self.output_width += data.output(self.output, self.output_width) |
|
300 | self.output_width += data.output(self.output, self.output_width) | |
312 | self.buffer.clear() |
|
301 | self.buffer.clear() | |
313 | self.buffer_width = 0 |
|
302 | self.buffer_width = 0 | |
314 |
|
303 | |||
315 |
|
304 | |||
316 | def _get_mro(obj_class): |
|
305 | def _get_mro(obj_class): | |
317 | """ Get a reasonable method resolution order of a class and its superclasses |
|
306 | """ Get a reasonable method resolution order of a class and its superclasses | |
318 | for both old-style and new-style classes. |
|
307 | for both old-style and new-style classes. | |
319 | """ |
|
308 | """ | |
320 | if not hasattr(obj_class, '__mro__'): |
|
309 | if not hasattr(obj_class, '__mro__'): | |
321 | # Old-style class. Mix in object to make a fake new-style class. |
|
310 | # Old-style class. Mix in object to make a fake new-style class. | |
322 | try: |
|
311 | try: | |
323 | obj_class = type(obj_class.__name__, (obj_class, object), {}) |
|
312 | obj_class = type(obj_class.__name__, (obj_class, object), {}) | |
324 | except TypeError: |
|
313 | except TypeError: | |
325 | # Old-style extension type that does not descend from object. |
|
314 | # Old-style extension type that does not descend from object. | |
326 | # FIXME: try to construct a more thorough MRO. |
|
315 | # FIXME: try to construct a more thorough MRO. | |
327 | mro = [obj_class] |
|
316 | mro = [obj_class] | |
328 | else: |
|
317 | else: | |
329 | mro = obj_class.__mro__[1:-1] |
|
318 | mro = obj_class.__mro__[1:-1] | |
330 | else: |
|
319 | else: | |
331 | mro = obj_class.__mro__ |
|
320 | mro = obj_class.__mro__ | |
332 | return mro |
|
321 | return mro | |
333 |
|
322 | |||
334 |
|
323 | |||
335 | class RepresentationPrinter(PrettyPrinter): |
|
324 | class RepresentationPrinter(PrettyPrinter): | |
336 | """ |
|
325 | """ | |
337 | Special pretty printer that has a `pretty` method that calls the pretty |
|
326 | Special pretty printer that has a `pretty` method that calls the pretty | |
338 | printer for a python object. |
|
327 | printer for a python object. | |
339 |
|
328 | |||
340 | This class stores processing data on `self` so you must *never* use |
|
329 | This class stores processing data on `self` so you must *never* use | |
341 | this class in a threaded environment. Always lock it or reinstanciate |
|
330 | this class in a threaded environment. Always lock it or reinstanciate | |
342 | it. |
|
331 | it. | |
343 |
|
332 | |||
344 | Instances also have a verbose flag callbacks can access to control their |
|
333 | Instances also have a verbose flag callbacks can access to control their | |
345 | output. For example the default instance repr prints all attributes and |
|
334 | output. For example the default instance repr prints all attributes and | |
346 | methods that are not prefixed by an underscore if the printer is in |
|
335 | methods that are not prefixed by an underscore if the printer is in | |
347 | verbose mode. |
|
336 | verbose mode. | |
348 | """ |
|
337 | """ | |
349 |
|
338 | |||
350 | def __init__(self, output, verbose=False, max_width=79, newline='\n', |
|
339 | def __init__(self, output, verbose=False, max_width=79, newline='\n', | |
351 | singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, |
|
340 | singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, | |
352 | max_seq_length=MAX_SEQ_LENGTH): |
|
341 | max_seq_length=MAX_SEQ_LENGTH): | |
353 |
|
342 | |||
354 | PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length) |
|
343 | PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length) | |
355 | self.verbose = verbose |
|
344 | self.verbose = verbose | |
356 | self.stack = [] |
|
345 | self.stack = [] | |
357 | if singleton_pprinters is None: |
|
346 | if singleton_pprinters is None: | |
358 | singleton_pprinters = _singleton_pprinters.copy() |
|
347 | singleton_pprinters = _singleton_pprinters.copy() | |
359 | self.singleton_pprinters = singleton_pprinters |
|
348 | self.singleton_pprinters = singleton_pprinters | |
360 | if type_pprinters is None: |
|
349 | if type_pprinters is None: | |
361 | type_pprinters = _type_pprinters.copy() |
|
350 | type_pprinters = _type_pprinters.copy() | |
362 | self.type_pprinters = type_pprinters |
|
351 | self.type_pprinters = type_pprinters | |
363 | if deferred_pprinters is None: |
|
352 | if deferred_pprinters is None: | |
364 | deferred_pprinters = _deferred_type_pprinters.copy() |
|
353 | deferred_pprinters = _deferred_type_pprinters.copy() | |
365 | self.deferred_pprinters = deferred_pprinters |
|
354 | self.deferred_pprinters = deferred_pprinters | |
366 |
|
355 | |||
367 | def pretty(self, obj): |
|
356 | def pretty(self, obj): | |
368 | """Pretty print the given object.""" |
|
357 | """Pretty print the given object.""" | |
369 | obj_id = id(obj) |
|
358 | obj_id = id(obj) | |
370 | cycle = obj_id in self.stack |
|
359 | cycle = obj_id in self.stack | |
371 | self.stack.append(obj_id) |
|
360 | self.stack.append(obj_id) | |
372 | self.begin_group() |
|
361 | self.begin_group() | |
373 | try: |
|
362 | try: | |
374 | obj_class = _safe_getattr(obj, '__class__', None) or type(obj) |
|
363 | obj_class = _safe_getattr(obj, '__class__', None) or type(obj) | |
375 | # First try to find registered singleton printers for the type. |
|
364 | # First try to find registered singleton printers for the type. | |
376 | try: |
|
365 | try: | |
377 | printer = self.singleton_pprinters[obj_id] |
|
366 | printer = self.singleton_pprinters[obj_id] | |
378 | except (TypeError, KeyError): |
|
367 | except (TypeError, KeyError): | |
379 | pass |
|
368 | pass | |
380 | else: |
|
369 | else: | |
381 | return printer(obj, self, cycle) |
|
370 | return printer(obj, self, cycle) | |
382 | # Next walk the mro and check for either: |
|
371 | # Next walk the mro and check for either: | |
383 | # 1) a registered printer |
|
372 | # 1) a registered printer | |
384 | # 2) a _repr_pretty_ method |
|
373 | # 2) a _repr_pretty_ method | |
385 | for cls in _get_mro(obj_class): |
|
374 | for cls in _get_mro(obj_class): | |
386 | if cls in self.type_pprinters: |
|
375 | if cls in self.type_pprinters: | |
387 | # printer registered in self.type_pprinters |
|
376 | # printer registered in self.type_pprinters | |
388 | return self.type_pprinters[cls](obj, self, cycle) |
|
377 | return self.type_pprinters[cls](obj, self, cycle) | |
389 | else: |
|
378 | else: | |
390 | # deferred printer |
|
379 | # deferred printer | |
391 | printer = self._in_deferred_types(cls) |
|
380 | printer = self._in_deferred_types(cls) | |
392 | if printer is not None: |
|
381 | if printer is not None: | |
393 | return printer(obj, self, cycle) |
|
382 | return printer(obj, self, cycle) | |
394 | else: |
|
383 | else: | |
395 | # Finally look for special method names. |
|
384 | # Finally look for special method names. | |
396 | # Some objects automatically create any requested |
|
385 | # Some objects automatically create any requested | |
397 | # attribute. Try to ignore most of them by checking for |
|
386 | # attribute. Try to ignore most of them by checking for | |
398 | # callability. |
|
387 | # callability. | |
399 | if '_repr_pretty_' in cls.__dict__: |
|
388 | if '_repr_pretty_' in cls.__dict__: | |
400 | meth = cls._repr_pretty_ |
|
389 | meth = cls._repr_pretty_ | |
401 | if callable(meth): |
|
390 | if callable(meth): | |
402 | return meth(obj, self, cycle) |
|
391 | return meth(obj, self, cycle) | |
403 | if cls is not object \ |
|
392 | if cls is not object \ | |
404 | and callable(cls.__dict__.get('__repr__')): |
|
393 | and callable(cls.__dict__.get('__repr__')): | |
405 | return _repr_pprint(obj, self, cycle) |
|
394 | return _repr_pprint(obj, self, cycle) | |
406 |
|
395 | |||
407 | return _default_pprint(obj, self, cycle) |
|
396 | return _default_pprint(obj, self, cycle) | |
408 | finally: |
|
397 | finally: | |
409 | self.end_group() |
|
398 | self.end_group() | |
410 | self.stack.pop() |
|
399 | self.stack.pop() | |
411 |
|
400 | |||
412 | def _in_deferred_types(self, cls): |
|
401 | def _in_deferred_types(self, cls): | |
413 | """ |
|
402 | """ | |
414 | Check if the given class is specified in the deferred type registry. |
|
403 | Check if the given class is specified in the deferred type registry. | |
415 |
|
404 | |||
416 | Returns the printer from the registry if it exists, and None if the |
|
405 | Returns the printer from the registry if it exists, and None if the | |
417 | class is not in the registry. Successful matches will be moved to the |
|
406 | class is not in the registry. Successful matches will be moved to the | |
418 | regular type registry for future use. |
|
407 | regular type registry for future use. | |
419 | """ |
|
408 | """ | |
420 | mod = _safe_getattr(cls, '__module__', None) |
|
409 | mod = _safe_getattr(cls, '__module__', None) | |
421 | name = _safe_getattr(cls, '__name__', None) |
|
410 | name = _safe_getattr(cls, '__name__', None) | |
422 | key = (mod, name) |
|
411 | key = (mod, name) | |
423 | printer = None |
|
412 | printer = None | |
424 | if key in self.deferred_pprinters: |
|
413 | if key in self.deferred_pprinters: | |
425 | # Move the printer over to the regular registry. |
|
414 | # Move the printer over to the regular registry. | |
426 | printer = self.deferred_pprinters.pop(key) |
|
415 | printer = self.deferred_pprinters.pop(key) | |
427 | self.type_pprinters[cls] = printer |
|
416 | self.type_pprinters[cls] = printer | |
428 | return printer |
|
417 | return printer | |
429 |
|
418 | |||
430 |
|
419 | |||
431 | class Printable(object): |
|
420 | class Printable(object): | |
432 |
|
421 | |||
433 | def output(self, stream, output_width): |
|
422 | def output(self, stream, output_width): | |
434 | return output_width |
|
423 | return output_width | |
435 |
|
424 | |||
436 |
|
425 | |||
437 | class Text(Printable): |
|
426 | class Text(Printable): | |
438 |
|
427 | |||
439 | def __init__(self): |
|
428 | def __init__(self): | |
440 | self.objs = [] |
|
429 | self.objs = [] | |
441 | self.width = 0 |
|
430 | self.width = 0 | |
442 |
|
431 | |||
443 | def output(self, stream, output_width): |
|
432 | def output(self, stream, output_width): | |
444 | for obj in self.objs: |
|
433 | for obj in self.objs: | |
445 | stream.write(obj) |
|
434 | stream.write(obj) | |
446 | return output_width + self.width |
|
435 | return output_width + self.width | |
447 |
|
436 | |||
448 | def add(self, obj, width): |
|
437 | def add(self, obj, width): | |
449 | self.objs.append(obj) |
|
438 | self.objs.append(obj) | |
450 | self.width += width |
|
439 | self.width += width | |
451 |
|
440 | |||
452 |
|
441 | |||
453 | class Breakable(Printable): |
|
442 | class Breakable(Printable): | |
454 |
|
443 | |||
455 | def __init__(self, seq, width, pretty): |
|
444 | def __init__(self, seq, width, pretty): | |
456 | self.obj = seq |
|
445 | self.obj = seq | |
457 | self.width = width |
|
446 | self.width = width | |
458 | self.pretty = pretty |
|
447 | self.pretty = pretty | |
459 | self.indentation = pretty.indentation |
|
448 | self.indentation = pretty.indentation | |
460 | self.group = pretty.group_stack[-1] |
|
449 | self.group = pretty.group_stack[-1] | |
461 | self.group.breakables.append(self) |
|
450 | self.group.breakables.append(self) | |
462 |
|
451 | |||
463 | def output(self, stream, output_width): |
|
452 | def output(self, stream, output_width): | |
464 | self.group.breakables.popleft() |
|
453 | self.group.breakables.popleft() | |
465 | if self.group.want_break: |
|
454 | if self.group.want_break: | |
466 | stream.write(self.pretty.newline) |
|
455 | stream.write(self.pretty.newline) | |
467 | stream.write(' ' * self.indentation) |
|
456 | stream.write(' ' * self.indentation) | |
468 | return self.indentation |
|
457 | return self.indentation | |
469 | if not self.group.breakables: |
|
458 | if not self.group.breakables: | |
470 | self.pretty.group_queue.remove(self.group) |
|
459 | self.pretty.group_queue.remove(self.group) | |
471 | stream.write(self.obj) |
|
460 | stream.write(self.obj) | |
472 | return output_width + self.width |
|
461 | return output_width + self.width | |
473 |
|
462 | |||
474 |
|
463 | |||
475 | class Group(Printable): |
|
464 | class Group(Printable): | |
476 |
|
465 | |||
477 | def __init__(self, depth): |
|
466 | def __init__(self, depth): | |
478 | self.depth = depth |
|
467 | self.depth = depth | |
479 | self.breakables = deque() |
|
468 | self.breakables = deque() | |
480 | self.want_break = False |
|
469 | self.want_break = False | |
481 |
|
470 | |||
482 |
|
471 | |||
483 | class GroupQueue(object): |
|
472 | class GroupQueue(object): | |
484 |
|
473 | |||
485 | def __init__(self, *groups): |
|
474 | def __init__(self, *groups): | |
486 | self.queue = [] |
|
475 | self.queue = [] | |
487 | for group in groups: |
|
476 | for group in groups: | |
488 | self.enq(group) |
|
477 | self.enq(group) | |
489 |
|
478 | |||
490 | def enq(self, group): |
|
479 | def enq(self, group): | |
491 | depth = group.depth |
|
480 | depth = group.depth | |
492 | while depth > len(self.queue) - 1: |
|
481 | while depth > len(self.queue) - 1: | |
493 | self.queue.append([]) |
|
482 | self.queue.append([]) | |
494 | self.queue[depth].append(group) |
|
483 | self.queue[depth].append(group) | |
495 |
|
484 | |||
496 | def deq(self): |
|
485 | def deq(self): | |
497 | for stack in self.queue: |
|
486 | for stack in self.queue: | |
498 | for idx, group in enumerate(reversed(stack)): |
|
487 | for idx, group in enumerate(reversed(stack)): | |
499 | if group.breakables: |
|
488 | if group.breakables: | |
500 | del stack[idx] |
|
489 | del stack[idx] | |
501 | group.want_break = True |
|
490 | group.want_break = True | |
502 | return group |
|
491 | return group | |
503 | for group in stack: |
|
492 | for group in stack: | |
504 | group.want_break = True |
|
493 | group.want_break = True | |
505 | del stack[:] |
|
494 | del stack[:] | |
506 |
|
495 | |||
507 | def remove(self, group): |
|
496 | def remove(self, group): | |
508 | try: |
|
497 | try: | |
509 | self.queue[group.depth].remove(group) |
|
498 | self.queue[group.depth].remove(group) | |
510 | except ValueError: |
|
499 | except ValueError: | |
511 | pass |
|
500 | pass | |
512 |
|
501 | |||
513 |
|
502 | |||
514 | def _default_pprint(obj, p, cycle): |
|
503 | def _default_pprint(obj, p, cycle): | |
515 | """ |
|
504 | """ | |
516 | The default print function. Used if an object does not provide one and |
|
505 | The default print function. Used if an object does not provide one and | |
517 | it's none of the builtin objects. |
|
506 | it's none of the builtin objects. | |
518 | """ |
|
507 | """ | |
519 | klass = _safe_getattr(obj, '__class__', None) or type(obj) |
|
508 | klass = _safe_getattr(obj, '__class__', None) or type(obj) | |
520 | if _safe_getattr(klass, '__repr__', None) is not object.__repr__: |
|
509 | if _safe_getattr(klass, '__repr__', None) is not object.__repr__: | |
521 | # A user-provided repr. Find newlines and replace them with p.break_() |
|
510 | # A user-provided repr. Find newlines and replace them with p.break_() | |
522 | _repr_pprint(obj, p, cycle) |
|
511 | _repr_pprint(obj, p, cycle) | |
523 | return |
|
512 | return | |
524 | p.begin_group(1, '<') |
|
513 | p.begin_group(1, '<') | |
525 | p.pretty(klass) |
|
514 | p.pretty(klass) | |
526 | p.text(' at 0x%x' % id(obj)) |
|
515 | p.text(' at 0x%x' % id(obj)) | |
527 | if cycle: |
|
516 | if cycle: | |
528 | p.text(' ...') |
|
517 | p.text(' ...') | |
529 | elif p.verbose: |
|
518 | elif p.verbose: | |
530 | first = True |
|
519 | first = True | |
531 | for key in dir(obj): |
|
520 | for key in dir(obj): | |
532 | if not key.startswith('_'): |
|
521 | if not key.startswith('_'): | |
533 | try: |
|
522 | try: | |
534 | value = getattr(obj, key) |
|
523 | value = getattr(obj, key) | |
535 | except AttributeError: |
|
524 | except AttributeError: | |
536 | continue |
|
525 | continue | |
537 | if isinstance(value, types.MethodType): |
|
526 | if isinstance(value, types.MethodType): | |
538 | continue |
|
527 | continue | |
539 | if not first: |
|
528 | if not first: | |
540 | p.text(',') |
|
529 | p.text(',') | |
541 | p.breakable() |
|
530 | p.breakable() | |
542 | p.text(key) |
|
531 | p.text(key) | |
543 | p.text('=') |
|
532 | p.text('=') | |
544 | step = len(key) + 1 |
|
533 | step = len(key) + 1 | |
545 | p.indentation += step |
|
534 | p.indentation += step | |
546 | p.pretty(value) |
|
535 | p.pretty(value) | |
547 | p.indentation -= step |
|
536 | p.indentation -= step | |
548 | first = False |
|
537 | first = False | |
549 | p.end_group(1, '>') |
|
538 | p.end_group(1, '>') | |
550 |
|
539 | |||
551 |
|
540 | |||
552 | def _seq_pprinter_factory(start, end): |
|
541 | def _seq_pprinter_factory(start, end): | |
553 | """ |
|
542 | """ | |
554 | Factory that returns a pprint function useful for sequences. Used by |
|
543 | Factory that returns a pprint function useful for sequences. Used by | |
555 | the default pprint for tuples, dicts, and lists. |
|
544 | the default pprint for tuples, dicts, and lists. | |
556 | """ |
|
545 | """ | |
557 | def inner(obj, p, cycle): |
|
546 | def inner(obj, p, cycle): | |
558 | if cycle: |
|
547 | if cycle: | |
559 | return p.text(start + '...' + end) |
|
548 | return p.text(start + '...' + end) | |
560 | step = len(start) |
|
549 | step = len(start) | |
561 | p.begin_group(step, start) |
|
550 | p.begin_group(step, start) | |
562 | for idx, x in p._enumerate(obj): |
|
551 | for idx, x in p._enumerate(obj): | |
563 | if idx: |
|
552 | if idx: | |
564 | p.text(',') |
|
553 | p.text(',') | |
565 | p.breakable() |
|
554 | p.breakable() | |
566 | p.pretty(x) |
|
555 | p.pretty(x) | |
567 | if len(obj) == 1 and type(obj) is tuple: |
|
556 | if len(obj) == 1 and type(obj) is tuple: | |
568 | # Special case for 1-item tuples. |
|
557 | # Special case for 1-item tuples. | |
569 | p.text(',') |
|
558 | p.text(',') | |
570 | p.end_group(step, end) |
|
559 | p.end_group(step, end) | |
571 | return inner |
|
560 | return inner | |
572 |
|
561 | |||
573 |
|
562 | |||
574 | def _set_pprinter_factory(start, end): |
|
563 | def _set_pprinter_factory(start, end): | |
575 | """ |
|
564 | """ | |
576 | Factory that returns a pprint function useful for sets and frozensets. |
|
565 | Factory that returns a pprint function useful for sets and frozensets. | |
577 | """ |
|
566 | """ | |
578 | def inner(obj, p, cycle): |
|
567 | def inner(obj, p, cycle): | |
579 | if cycle: |
|
568 | if cycle: | |
580 | return p.text(start + '...' + end) |
|
569 | return p.text(start + '...' + end) | |
581 | if len(obj) == 0: |
|
570 | if len(obj) == 0: | |
582 | # Special case. |
|
571 | # Special case. | |
583 | p.text(type(obj).__name__ + '()') |
|
572 | p.text(type(obj).__name__ + '()') | |
584 | else: |
|
573 | else: | |
585 | step = len(start) |
|
574 | step = len(start) | |
586 | p.begin_group(step, start) |
|
575 | p.begin_group(step, start) | |
587 | # Like dictionary keys, we will try to sort the items if there aren't too many |
|
576 | # Like dictionary keys, we will try to sort the items if there aren't too many | |
588 | if not (p.max_seq_length and len(obj) >= p.max_seq_length): |
|
577 | if not (p.max_seq_length and len(obj) >= p.max_seq_length): | |
589 | items = _sorted_for_pprint(obj) |
|
578 | items = _sorted_for_pprint(obj) | |
590 | else: |
|
579 | else: | |
591 | items = obj |
|
580 | items = obj | |
592 | for idx, x in p._enumerate(items): |
|
581 | for idx, x in p._enumerate(items): | |
593 | if idx: |
|
582 | if idx: | |
594 | p.text(',') |
|
583 | p.text(',') | |
595 | p.breakable() |
|
584 | p.breakable() | |
596 | p.pretty(x) |
|
585 | p.pretty(x) | |
597 | p.end_group(step, end) |
|
586 | p.end_group(step, end) | |
598 | return inner |
|
587 | return inner | |
599 |
|
588 | |||
600 |
|
589 | |||
601 | def _dict_pprinter_factory(start, end): |
|
590 | def _dict_pprinter_factory(start, end): | |
602 | """ |
|
591 | """ | |
603 | Factory that returns a pprint function used by the default pprint of |
|
592 | Factory that returns a pprint function used by the default pprint of | |
604 | dicts and dict proxies. |
|
593 | dicts and dict proxies. | |
605 | """ |
|
594 | """ | |
606 | def inner(obj, p, cycle): |
|
595 | def inner(obj, p, cycle): | |
607 | if cycle: |
|
596 | if cycle: | |
608 | return p.text('{...}') |
|
597 | return p.text('{...}') | |
609 | step = len(start) |
|
598 | step = len(start) | |
610 | p.begin_group(step, start) |
|
599 | p.begin_group(step, start) | |
611 | keys = obj.keys() |
|
600 | keys = obj.keys() | |
612 | for idx, key in p._enumerate(keys): |
|
601 | for idx, key in p._enumerate(keys): | |
613 | if idx: |
|
602 | if idx: | |
614 | p.text(',') |
|
603 | p.text(',') | |
615 | p.breakable() |
|
604 | p.breakable() | |
616 | p.pretty(key) |
|
605 | p.pretty(key) | |
617 | p.text(': ') |
|
606 | p.text(': ') | |
618 | p.pretty(obj[key]) |
|
607 | p.pretty(obj[key]) | |
619 | p.end_group(step, end) |
|
608 | p.end_group(step, end) | |
620 | return inner |
|
609 | return inner | |
621 |
|
610 | |||
622 |
|
611 | |||
623 | def _super_pprint(obj, p, cycle): |
|
612 | def _super_pprint(obj, p, cycle): | |
624 | """The pprint for the super type.""" |
|
613 | """The pprint for the super type.""" | |
625 | p.begin_group(8, '<super: ') |
|
614 | p.begin_group(8, '<super: ') | |
626 | p.pretty(obj.__thisclass__) |
|
615 | p.pretty(obj.__thisclass__) | |
627 | p.text(',') |
|
616 | p.text(',') | |
628 | p.breakable() |
|
617 | p.breakable() | |
629 | if PYPY: # In PyPy, super() objects don't have __self__ attributes |
|
618 | if PYPY: # In PyPy, super() objects don't have __self__ attributes | |
630 | dself = obj.__repr__.__self__ |
|
619 | dself = obj.__repr__.__self__ | |
631 | p.pretty(None if dself is obj else dself) |
|
620 | p.pretty(None if dself is obj else dself) | |
632 | else: |
|
621 | else: | |
633 | p.pretty(obj.__self__) |
|
622 | p.pretty(obj.__self__) | |
634 | p.end_group(8, '>') |
|
623 | p.end_group(8, '>') | |
635 |
|
624 | |||
636 |
|
625 | |||
637 | def _re_pattern_pprint(obj, p, cycle): |
|
626 | def _re_pattern_pprint(obj, p, cycle): | |
638 | """The pprint function for regular expression patterns.""" |
|
627 | """The pprint function for regular expression patterns.""" | |
639 | p.text('re.compile(') |
|
628 | p.text('re.compile(') | |
640 | pattern = repr(obj.pattern) |
|
629 | pattern = repr(obj.pattern) | |
641 | if pattern[:1] in 'uU': |
|
630 | if pattern[:1] in 'uU': | |
642 | pattern = pattern[1:] |
|
631 | pattern = pattern[1:] | |
643 | prefix = 'ur' |
|
632 | prefix = 'ur' | |
644 | else: |
|
633 | else: | |
645 | prefix = 'r' |
|
634 | prefix = 'r' | |
646 | pattern = prefix + pattern.replace('\\\\', '\\') |
|
635 | pattern = prefix + pattern.replace('\\\\', '\\') | |
647 | p.text(pattern) |
|
636 | p.text(pattern) | |
648 | if obj.flags: |
|
637 | if obj.flags: | |
649 | p.text(',') |
|
638 | p.text(',') | |
650 | p.breakable() |
|
639 | p.breakable() | |
651 | done_one = False |
|
640 | done_one = False | |
652 | for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', |
|
641 | for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', | |
653 | 'UNICODE', 'VERBOSE', 'DEBUG'): |
|
642 | 'UNICODE', 'VERBOSE', 'DEBUG'): | |
654 | if obj.flags & getattr(re, flag): |
|
643 | if obj.flags & getattr(re, flag): | |
655 | if done_one: |
|
644 | if done_one: | |
656 | p.text('|') |
|
645 | p.text('|') | |
657 | p.text('re.' + flag) |
|
646 | p.text('re.' + flag) | |
658 | done_one = True |
|
647 | done_one = True | |
659 | p.text(')') |
|
648 | p.text(')') | |
660 |
|
649 | |||
661 |
|
650 | |||
662 | def _type_pprint(obj, p, cycle): |
|
651 | def _type_pprint(obj, p, cycle): | |
663 | """The pprint for classes and types.""" |
|
652 | """The pprint for classes and types.""" | |
664 | # Heap allocated types might not have the module attribute, |
|
653 | # Heap allocated types might not have the module attribute, | |
665 | # and others may set it to None. |
|
654 | # and others may set it to None. | |
666 |
|
655 | |||
667 | # Checks for a __repr__ override in the metaclass. Can't compare the |
|
656 | # Checks for a __repr__ override in the metaclass. Can't compare the | |
668 | # type(obj).__repr__ directly because in PyPy the representation function |
|
657 | # type(obj).__repr__ directly because in PyPy the representation function | |
669 | # inherited from type isn't the same type.__repr__ |
|
658 | # inherited from type isn't the same type.__repr__ | |
670 | if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]: |
|
659 | if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]: | |
671 | _repr_pprint(obj, p, cycle) |
|
660 | _repr_pprint(obj, p, cycle) | |
672 | return |
|
661 | return | |
673 |
|
662 | |||
674 | mod = _safe_getattr(obj, '__module__', None) |
|
663 | mod = _safe_getattr(obj, '__module__', None) | |
675 | try: |
|
664 | try: | |
676 | name = obj.__qualname__ |
|
665 | name = obj.__qualname__ | |
677 | if not isinstance(name, str): |
|
666 | if not isinstance(name, str): | |
678 | # This can happen if the type implements __qualname__ as a property |
|
667 | # This can happen if the type implements __qualname__ as a property | |
679 | # or other descriptor in Python 2. |
|
668 | # or other descriptor in Python 2. | |
680 | raise Exception("Try __name__") |
|
669 | raise Exception("Try __name__") | |
681 | except Exception: |
|
670 | except Exception: | |
682 | name = obj.__name__ |
|
671 | name = obj.__name__ | |
683 | if not isinstance(name, str): |
|
672 | if not isinstance(name, str): | |
684 | name = '<unknown type>' |
|
673 | name = '<unknown type>' | |
685 |
|
674 | |||
686 | if mod in (None, '__builtin__', 'builtins', 'exceptions'): |
|
675 | if mod in (None, '__builtin__', 'builtins', 'exceptions'): | |
687 | p.text(name) |
|
676 | p.text(name) | |
688 | else: |
|
677 | else: | |
689 | p.text(mod + '.' + name) |
|
678 | p.text(mod + '.' + name) | |
690 |
|
679 | |||
691 |
|
680 | |||
692 | def _repr_pprint(obj, p, cycle): |
|
681 | def _repr_pprint(obj, p, cycle): | |
693 | """A pprint that just redirects to the normal repr function.""" |
|
682 | """A pprint that just redirects to the normal repr function.""" | |
694 | # Find newlines and replace them with p.break_() |
|
683 | # Find newlines and replace them with p.break_() | |
695 | output = repr(obj) |
|
684 | output = repr(obj) | |
696 | lines = output.splitlines() |
|
685 | lines = output.splitlines() | |
697 | with p.group(): |
|
686 | with p.group(): | |
698 | for idx, output_line in enumerate(lines): |
|
687 | for idx, output_line in enumerate(lines): | |
699 | if idx: |
|
688 | if idx: | |
700 | p.break_() |
|
689 | p.break_() | |
701 | p.text(output_line) |
|
690 | p.text(output_line) | |
702 |
|
691 | |||
703 |
|
692 | |||
704 | def _function_pprint(obj, p, cycle): |
|
693 | def _function_pprint(obj, p, cycle): | |
705 | """Base pprint for all functions and builtin functions.""" |
|
694 | """Base pprint for all functions and builtin functions.""" | |
706 | name = _safe_getattr(obj, '__qualname__', obj.__name__) |
|
695 | name = _safe_getattr(obj, '__qualname__', obj.__name__) | |
707 | mod = obj.__module__ |
|
696 | mod = obj.__module__ | |
708 | if mod and mod not in ('__builtin__', 'builtins', 'exceptions'): |
|
697 | if mod and mod not in ('__builtin__', 'builtins', 'exceptions'): | |
709 | name = mod + '.' + name |
|
698 | name = mod + '.' + name | |
710 | try: |
|
699 | try: | |
711 | func_def = name + str(signature(obj)) |
|
700 | func_def = name + str(signature(obj)) | |
712 | except ValueError: |
|
701 | except ValueError: | |
713 | func_def = name |
|
702 | func_def = name | |
714 | p.text('<function %s>' % func_def) |
|
703 | p.text('<function %s>' % func_def) | |
715 |
|
704 | |||
716 |
|
705 | |||
717 | def _exception_pprint(obj, p, cycle): |
|
706 | def _exception_pprint(obj, p, cycle): | |
718 | """Base pprint for all exceptions.""" |
|
707 | """Base pprint for all exceptions.""" | |
719 | name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__) |
|
708 | name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__) | |
720 | if obj.__class__.__module__ not in ('exceptions', 'builtins'): |
|
709 | if obj.__class__.__module__ not in ('exceptions', 'builtins'): | |
721 | name = '%s.%s' % (obj.__class__.__module__, name) |
|
710 | name = '%s.%s' % (obj.__class__.__module__, name) | |
722 | step = len(name) + 1 |
|
711 | step = len(name) + 1 | |
723 | p.begin_group(step, name + '(') |
|
712 | p.begin_group(step, name + '(') | |
724 | for idx, arg in enumerate(getattr(obj, 'args', ())): |
|
713 | for idx, arg in enumerate(getattr(obj, 'args', ())): | |
725 | if idx: |
|
714 | if idx: | |
726 | p.text(',') |
|
715 | p.text(',') | |
727 | p.breakable() |
|
716 | p.breakable() | |
728 | p.pretty(arg) |
|
717 | p.pretty(arg) | |
729 | p.end_group(step, ')') |
|
718 | p.end_group(step, ')') | |
730 |
|
719 | |||
731 |
|
720 | |||
732 | #: the exception base |
|
721 | #: the exception base | |
733 | try: |
|
722 | try: | |
734 | _exception_base = BaseException |
|
723 | _exception_base = BaseException | |
735 | except NameError: |
|
724 | except NameError: | |
736 | _exception_base = Exception |
|
725 | _exception_base = Exception | |
737 |
|
726 | |||
738 |
|
727 | |||
739 | #: printers for builtin types |
|
728 | #: printers for builtin types | |
740 | _type_pprinters = { |
|
729 | _type_pprinters = { | |
741 | int: _repr_pprint, |
|
730 | int: _repr_pprint, | |
742 | float: _repr_pprint, |
|
731 | float: _repr_pprint, | |
743 | str: _repr_pprint, |
|
732 | str: _repr_pprint, | |
744 | tuple: _seq_pprinter_factory('(', ')'), |
|
733 | tuple: _seq_pprinter_factory('(', ')'), | |
745 | list: _seq_pprinter_factory('[', ']'), |
|
734 | list: _seq_pprinter_factory('[', ']'), | |
746 | dict: _dict_pprinter_factory('{', '}'), |
|
735 | dict: _dict_pprinter_factory('{', '}'), | |
747 | set: _set_pprinter_factory('{', '}'), |
|
736 | set: _set_pprinter_factory('{', '}'), | |
748 | frozenset: _set_pprinter_factory('frozenset({', '})'), |
|
737 | frozenset: _set_pprinter_factory('frozenset({', '})'), | |
749 | super: _super_pprint, |
|
738 | super: _super_pprint, | |
750 | _re_pattern_type: _re_pattern_pprint, |
|
739 | _re_pattern_type: _re_pattern_pprint, | |
751 | type: _type_pprint, |
|
740 | type: _type_pprint, | |
752 | types.FunctionType: _function_pprint, |
|
741 | types.FunctionType: _function_pprint, | |
753 | types.BuiltinFunctionType: _function_pprint, |
|
742 | types.BuiltinFunctionType: _function_pprint, | |
754 | types.MethodType: _repr_pprint, |
|
743 | types.MethodType: _repr_pprint, | |
755 | datetime.datetime: _repr_pprint, |
|
744 | datetime.datetime: _repr_pprint, | |
756 | datetime.timedelta: _repr_pprint, |
|
745 | datetime.timedelta: _repr_pprint, | |
757 | _exception_base: _exception_pprint |
|
746 | _exception_base: _exception_pprint | |
758 | } |
|
747 | } | |
759 |
|
748 | |||
760 | # render os.environ like a dict |
|
749 | # render os.environ like a dict | |
761 | _env_type = type(os.environ) |
|
750 | _env_type = type(os.environ) | |
762 | # future-proof in case os.environ becomes a plain dict? |
|
751 | # future-proof in case os.environ becomes a plain dict? | |
763 | if _env_type is not dict: |
|
752 | if _env_type is not dict: | |
764 | _type_pprinters[_env_type] = _dict_pprinter_factory('environ{', '}') |
|
753 | _type_pprinters[_env_type] = _dict_pprinter_factory('environ{', '}') | |
765 |
|
754 | |||
766 | try: |
|
755 | try: | |
767 | # In PyPy, types.DictProxyType is dict, setting the dictproxy printer |
|
756 | # In PyPy, types.DictProxyType is dict, setting the dictproxy printer | |
768 | # using dict.setdefault avoids overwriting the dict printer |
|
757 | # using dict.setdefault avoids overwriting the dict printer | |
769 | _type_pprinters.setdefault(types.DictProxyType, |
|
758 | _type_pprinters.setdefault(types.DictProxyType, | |
770 | _dict_pprinter_factory('dict_proxy({', '})')) |
|
759 | _dict_pprinter_factory('dict_proxy({', '})')) | |
771 | _type_pprinters[types.ClassType] = _type_pprint |
|
760 | _type_pprinters[types.ClassType] = _type_pprint | |
772 | _type_pprinters[types.SliceType] = _repr_pprint |
|
761 | _type_pprinters[types.SliceType] = _repr_pprint | |
773 | except AttributeError: # Python 3 |
|
762 | except AttributeError: # Python 3 | |
774 | _type_pprinters[types.MappingProxyType] = \ |
|
763 | _type_pprinters[types.MappingProxyType] = \ | |
775 | _dict_pprinter_factory('mappingproxy({', '})') |
|
764 | _dict_pprinter_factory('mappingproxy({', '})') | |
776 | _type_pprinters[slice] = _repr_pprint |
|
765 | _type_pprinters[slice] = _repr_pprint | |
777 |
|
766 | |||
778 | try: |
|
767 | _type_pprinters[range] = _repr_pprint | |
779 |
|
|
768 | _type_pprinters[bytes] = _repr_pprint | |
780 | _type_pprinters[unicode] = _repr_pprint |
|
|||
781 | except NameError: |
|
|||
782 | _type_pprinters[range] = _repr_pprint |
|
|||
783 | _type_pprinters[bytes] = _repr_pprint |
|
|||
784 |
|
769 | |||
785 | #: printers for types specified by name |
|
770 | #: printers for types specified by name | |
786 | _deferred_type_pprinters = { |
|
771 | _deferred_type_pprinters = { | |
787 | } |
|
772 | } | |
788 |
|
773 | |||
789 | def for_type(typ, func): |
|
774 | def for_type(typ, func): | |
790 | """ |
|
775 | """ | |
791 | Add a pretty printer for a given type. |
|
776 | Add a pretty printer for a given type. | |
792 | """ |
|
777 | """ | |
793 | oldfunc = _type_pprinters.get(typ, None) |
|
778 | oldfunc = _type_pprinters.get(typ, None) | |
794 | if func is not None: |
|
779 | if func is not None: | |
795 | # To support easy restoration of old pprinters, we need to ignore Nones. |
|
780 | # To support easy restoration of old pprinters, we need to ignore Nones. | |
796 | _type_pprinters[typ] = func |
|
781 | _type_pprinters[typ] = func | |
797 | return oldfunc |
|
782 | return oldfunc | |
798 |
|
783 | |||
799 | def for_type_by_name(type_module, type_name, func): |
|
784 | def for_type_by_name(type_module, type_name, func): | |
800 | """ |
|
785 | """ | |
801 | Add a pretty printer for a type specified by the module and name of a type |
|
786 | Add a pretty printer for a type specified by the module and name of a type | |
802 | rather than the type object itself. |
|
787 | rather than the type object itself. | |
803 | """ |
|
788 | """ | |
804 | key = (type_module, type_name) |
|
789 | key = (type_module, type_name) | |
805 | oldfunc = _deferred_type_pprinters.get(key, None) |
|
790 | oldfunc = _deferred_type_pprinters.get(key, None) | |
806 | if func is not None: |
|
791 | if func is not None: | |
807 | # To support easy restoration of old pprinters, we need to ignore Nones. |
|
792 | # To support easy restoration of old pprinters, we need to ignore Nones. | |
808 | _deferred_type_pprinters[key] = func |
|
793 | _deferred_type_pprinters[key] = func | |
809 | return oldfunc |
|
794 | return oldfunc | |
810 |
|
795 | |||
811 |
|
796 | |||
812 | #: printers for the default singletons |
|
797 | #: printers for the default singletons | |
813 | _singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis, |
|
798 | _singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis, | |
814 | NotImplemented]), _repr_pprint) |
|
799 | NotImplemented]), _repr_pprint) | |
815 |
|
800 | |||
816 |
|
801 | |||
817 | def _defaultdict_pprint(obj, p, cycle): |
|
802 | def _defaultdict_pprint(obj, p, cycle): | |
818 | name = obj.__class__.__name__ |
|
803 | name = obj.__class__.__name__ | |
819 | with p.group(len(name) + 1, name + '(', ')'): |
|
804 | with p.group(len(name) + 1, name + '(', ')'): | |
820 | if cycle: |
|
805 | if cycle: | |
821 | p.text('...') |
|
806 | p.text('...') | |
822 | else: |
|
807 | else: | |
823 | p.pretty(obj.default_factory) |
|
808 | p.pretty(obj.default_factory) | |
824 | p.text(',') |
|
809 | p.text(',') | |
825 | p.breakable() |
|
810 | p.breakable() | |
826 | p.pretty(dict(obj)) |
|
811 | p.pretty(dict(obj)) | |
827 |
|
812 | |||
828 | def _ordereddict_pprint(obj, p, cycle): |
|
813 | def _ordereddict_pprint(obj, p, cycle): | |
829 | name = obj.__class__.__name__ |
|
814 | name = obj.__class__.__name__ | |
830 | with p.group(len(name) + 1, name + '(', ')'): |
|
815 | with p.group(len(name) + 1, name + '(', ')'): | |
831 | if cycle: |
|
816 | if cycle: | |
832 | p.text('...') |
|
817 | p.text('...') | |
833 | elif len(obj): |
|
818 | elif len(obj): | |
834 | p.pretty(list(obj.items())) |
|
819 | p.pretty(list(obj.items())) | |
835 |
|
820 | |||
836 | def _deque_pprint(obj, p, cycle): |
|
821 | def _deque_pprint(obj, p, cycle): | |
837 | name = obj.__class__.__name__ |
|
822 | name = obj.__class__.__name__ | |
838 | with p.group(len(name) + 1, name + '(', ')'): |
|
823 | with p.group(len(name) + 1, name + '(', ')'): | |
839 | if cycle: |
|
824 | if cycle: | |
840 | p.text('...') |
|
825 | p.text('...') | |
841 | else: |
|
826 | else: | |
842 | p.pretty(list(obj)) |
|
827 | p.pretty(list(obj)) | |
843 |
|
828 | |||
844 |
|
829 | |||
845 | def _counter_pprint(obj, p, cycle): |
|
830 | def _counter_pprint(obj, p, cycle): | |
846 | name = obj.__class__.__name__ |
|
831 | name = obj.__class__.__name__ | |
847 | with p.group(len(name) + 1, name + '(', ')'): |
|
832 | with p.group(len(name) + 1, name + '(', ')'): | |
848 | if cycle: |
|
833 | if cycle: | |
849 | p.text('...') |
|
834 | p.text('...') | |
850 | elif len(obj): |
|
835 | elif len(obj): | |
851 | p.pretty(dict(obj)) |
|
836 | p.pretty(dict(obj)) | |
852 |
|
837 | |||
853 | for_type_by_name('collections', 'defaultdict', _defaultdict_pprint) |
|
838 | for_type_by_name('collections', 'defaultdict', _defaultdict_pprint) | |
854 | for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint) |
|
839 | for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint) | |
855 | for_type_by_name('collections', 'deque', _deque_pprint) |
|
840 | for_type_by_name('collections', 'deque', _deque_pprint) | |
856 | for_type_by_name('collections', 'Counter', _counter_pprint) |
|
841 | for_type_by_name('collections', 'Counter', _counter_pprint) | |
857 |
|
842 | |||
858 | if __name__ == '__main__': |
|
843 | if __name__ == '__main__': | |
859 | from random import randrange |
|
844 | from random import randrange | |
860 | class Foo(object): |
|
845 | class Foo(object): | |
861 | def __init__(self): |
|
846 | def __init__(self): | |
862 | self.foo = 1 |
|
847 | self.foo = 1 | |
863 | self.bar = re.compile(r'\s+') |
|
848 | self.bar = re.compile(r'\s+') | |
864 | self.blub = dict.fromkeys(range(30), randrange(1, 40)) |
|
849 | self.blub = dict.fromkeys(range(30), randrange(1, 40)) | |
865 | self.hehe = 23424.234234 |
|
850 | self.hehe = 23424.234234 | |
866 | self.list = ["blub", "blah", self] |
|
851 | self.list = ["blub", "blah", self] | |
867 |
|
852 | |||
868 | def get_foo(self): |
|
853 | def get_foo(self): | |
869 | print("foo") |
|
854 | print("foo") | |
870 |
|
855 | |||
871 | pprint(Foo(), verbose=True) |
|
856 | pprint(Foo(), verbose=True) |
@@ -1,270 +1,266 b'' | |||||
1 | """Tests for IPython.lib.display. |
|
1 | """Tests for IPython.lib.display. | |
2 |
|
2 | |||
3 | """ |
|
3 | """ | |
4 | #----------------------------------------------------------------------------- |
|
4 | #----------------------------------------------------------------------------- | |
5 | # Copyright (c) 2012, the IPython Development Team. |
|
5 | # Copyright (c) 2012, the IPython Development Team. | |
6 | # |
|
6 | # | |
7 | # Distributed under the terms of the Modified BSD License. |
|
7 | # Distributed under the terms of the Modified BSD License. | |
8 | # |
|
8 | # | |
9 | # The full license is in the file COPYING.txt, distributed with this software. |
|
9 | # The full license is in the file COPYING.txt, distributed with this software. | |
10 | #----------------------------------------------------------------------------- |
|
10 | #----------------------------------------------------------------------------- | |
11 |
|
11 | |||
12 | #----------------------------------------------------------------------------- |
|
12 | #----------------------------------------------------------------------------- | |
13 | # Imports |
|
13 | # Imports | |
14 | #----------------------------------------------------------------------------- |
|
14 | #----------------------------------------------------------------------------- | |
15 | from tempfile import NamedTemporaryFile, mkdtemp |
|
15 | from tempfile import NamedTemporaryFile, mkdtemp | |
16 | from os.path import split, join as pjoin, dirname |
|
16 | from os.path import split, join as pjoin, dirname | |
17 | import sys |
|
17 | import pathlib | |
18 | try: |
|
|||
19 | import pathlib |
|
|||
20 | except ImportError: |
|
|||
21 | pass |
|
|||
22 | from unittest import TestCase, mock |
|
18 | from unittest import TestCase, mock | |
23 | import struct |
|
19 | import struct | |
24 | import wave |
|
20 | import wave | |
25 | from io import BytesIO |
|
21 | from io import BytesIO | |
26 |
|
22 | |||
27 | # Third-party imports |
|
23 | # Third-party imports | |
28 | import nose.tools as nt |
|
24 | import nose.tools as nt | |
29 |
|
25 | |||
30 | try: |
|
26 | try: | |
31 | import numpy |
|
27 | import numpy | |
32 | except ImportError: |
|
28 | except ImportError: | |
33 | pass |
|
29 | pass | |
34 |
|
30 | |||
35 | # Our own imports |
|
31 | # Our own imports | |
36 | from IPython.lib import display |
|
32 | from IPython.lib import display | |
37 |
|
33 | |||
38 | from IPython.testing.decorators import skipif_not_numpy |
|
34 | from IPython.testing.decorators import skipif_not_numpy | |
39 |
|
35 | |||
40 | #----------------------------------------------------------------------------- |
|
36 | #----------------------------------------------------------------------------- | |
41 | # Classes and functions |
|
37 | # Classes and functions | |
42 | #----------------------------------------------------------------------------- |
|
38 | #----------------------------------------------------------------------------- | |
43 |
|
39 | |||
44 | #-------------------------- |
|
40 | #-------------------------- | |
45 | # FileLink tests |
|
41 | # FileLink tests | |
46 | #-------------------------- |
|
42 | #-------------------------- | |
47 |
|
43 | |||
48 | def test_instantiation_FileLink(): |
|
44 | def test_instantiation_FileLink(): | |
49 | """FileLink: Test class can be instantiated""" |
|
45 | """FileLink: Test class can be instantiated""" | |
50 | fl = display.FileLink('example.txt') |
|
46 | fl = display.FileLink('example.txt') | |
51 | # TODO: remove if when only Python >= 3.6 is supported |
|
47 | # TODO: remove if when only Python >= 3.6 is supported | |
52 | fl = display.FileLink(pathlib.PurePath('example.txt')) |
|
48 | fl = display.FileLink(pathlib.PurePath('example.txt')) | |
53 |
|
49 | |||
54 | def test_warning_on_non_existent_path_FileLink(): |
|
50 | def test_warning_on_non_existent_path_FileLink(): | |
55 | """FileLink: Calling _repr_html_ on non-existent files returns a warning |
|
51 | """FileLink: Calling _repr_html_ on non-existent files returns a warning | |
56 | """ |
|
52 | """ | |
57 | fl = display.FileLink('example.txt') |
|
53 | fl = display.FileLink('example.txt') | |
58 | nt.assert_true(fl._repr_html_().startswith('Path (<tt>example.txt</tt>)')) |
|
54 | nt.assert_true(fl._repr_html_().startswith('Path (<tt>example.txt</tt>)')) | |
59 |
|
55 | |||
60 | def test_existing_path_FileLink(): |
|
56 | def test_existing_path_FileLink(): | |
61 | """FileLink: Calling _repr_html_ functions as expected on existing filepath |
|
57 | """FileLink: Calling _repr_html_ functions as expected on existing filepath | |
62 | """ |
|
58 | """ | |
63 | tf = NamedTemporaryFile() |
|
59 | tf = NamedTemporaryFile() | |
64 | fl = display.FileLink(tf.name) |
|
60 | fl = display.FileLink(tf.name) | |
65 | actual = fl._repr_html_() |
|
61 | actual = fl._repr_html_() | |
66 | expected = "<a href='%s' target='_blank'>%s</a><br>" % (tf.name,tf.name) |
|
62 | expected = "<a href='%s' target='_blank'>%s</a><br>" % (tf.name,tf.name) | |
67 | nt.assert_equal(actual,expected) |
|
63 | nt.assert_equal(actual,expected) | |
68 |
|
64 | |||
69 | def test_existing_path_FileLink_repr(): |
|
65 | def test_existing_path_FileLink_repr(): | |
70 | """FileLink: Calling repr() functions as expected on existing filepath |
|
66 | """FileLink: Calling repr() functions as expected on existing filepath | |
71 | """ |
|
67 | """ | |
72 | tf = NamedTemporaryFile() |
|
68 | tf = NamedTemporaryFile() | |
73 | fl = display.FileLink(tf.name) |
|
69 | fl = display.FileLink(tf.name) | |
74 | actual = repr(fl) |
|
70 | actual = repr(fl) | |
75 | expected = tf.name |
|
71 | expected = tf.name | |
76 | nt.assert_equal(actual,expected) |
|
72 | nt.assert_equal(actual,expected) | |
77 |
|
73 | |||
78 | def test_error_on_directory_to_FileLink(): |
|
74 | def test_error_on_directory_to_FileLink(): | |
79 | """FileLink: Raises error when passed directory |
|
75 | """FileLink: Raises error when passed directory | |
80 | """ |
|
76 | """ | |
81 | td = mkdtemp() |
|
77 | td = mkdtemp() | |
82 | nt.assert_raises(ValueError,display.FileLink,td) |
|
78 | nt.assert_raises(ValueError,display.FileLink,td) | |
83 |
|
79 | |||
84 | #-------------------------- |
|
80 | #-------------------------- | |
85 | # FileLinks tests |
|
81 | # FileLinks tests | |
86 | #-------------------------- |
|
82 | #-------------------------- | |
87 |
|
83 | |||
88 | def test_instantiation_FileLinks(): |
|
84 | def test_instantiation_FileLinks(): | |
89 | """FileLinks: Test class can be instantiated |
|
85 | """FileLinks: Test class can be instantiated | |
90 | """ |
|
86 | """ | |
91 | fls = display.FileLinks('example') |
|
87 | fls = display.FileLinks('example') | |
92 |
|
88 | |||
93 | def test_warning_on_non_existent_path_FileLinks(): |
|
89 | def test_warning_on_non_existent_path_FileLinks(): | |
94 | """FileLinks: Calling _repr_html_ on non-existent files returns a warning |
|
90 | """FileLinks: Calling _repr_html_ on non-existent files returns a warning | |
95 | """ |
|
91 | """ | |
96 | fls = display.FileLinks('example') |
|
92 | fls = display.FileLinks('example') | |
97 | nt.assert_true(fls._repr_html_().startswith('Path (<tt>example</tt>)')) |
|
93 | nt.assert_true(fls._repr_html_().startswith('Path (<tt>example</tt>)')) | |
98 |
|
94 | |||
99 | def test_existing_path_FileLinks(): |
|
95 | def test_existing_path_FileLinks(): | |
100 | """FileLinks: Calling _repr_html_ functions as expected on existing dir |
|
96 | """FileLinks: Calling _repr_html_ functions as expected on existing dir | |
101 | """ |
|
97 | """ | |
102 | td = mkdtemp() |
|
98 | td = mkdtemp() | |
103 | tf1 = NamedTemporaryFile(dir=td) |
|
99 | tf1 = NamedTemporaryFile(dir=td) | |
104 | tf2 = NamedTemporaryFile(dir=td) |
|
100 | tf2 = NamedTemporaryFile(dir=td) | |
105 | fl = display.FileLinks(td) |
|
101 | fl = display.FileLinks(td) | |
106 | actual = fl._repr_html_() |
|
102 | actual = fl._repr_html_() | |
107 | actual = actual.split('\n') |
|
103 | actual = actual.split('\n') | |
108 | actual.sort() |
|
104 | actual.sort() | |
109 | # the links should always have forward slashes, even on windows, so replace |
|
105 | # the links should always have forward slashes, even on windows, so replace | |
110 | # backslashes with forward slashes here |
|
106 | # backslashes with forward slashes here | |
111 | expected = ["%s/<br>" % td, |
|
107 | expected = ["%s/<br>" % td, | |
112 | " <a href='%s' target='_blank'>%s</a><br>" %\ |
|
108 | " <a href='%s' target='_blank'>%s</a><br>" %\ | |
113 | (tf2.name.replace("\\","/"),split(tf2.name)[1]), |
|
109 | (tf2.name.replace("\\","/"),split(tf2.name)[1]), | |
114 | " <a href='%s' target='_blank'>%s</a><br>" %\ |
|
110 | " <a href='%s' target='_blank'>%s</a><br>" %\ | |
115 | (tf1.name.replace("\\","/"),split(tf1.name)[1])] |
|
111 | (tf1.name.replace("\\","/"),split(tf1.name)[1])] | |
116 | expected.sort() |
|
112 | expected.sort() | |
117 | # We compare the sorted list of links here as that's more reliable |
|
113 | # We compare the sorted list of links here as that's more reliable | |
118 | nt.assert_equal(actual,expected) |
|
114 | nt.assert_equal(actual,expected) | |
119 |
|
115 | |||
120 | def test_existing_path_FileLinks_alt_formatter(): |
|
116 | def test_existing_path_FileLinks_alt_formatter(): | |
121 | """FileLinks: Calling _repr_html_ functions as expected w/ an alt formatter |
|
117 | """FileLinks: Calling _repr_html_ functions as expected w/ an alt formatter | |
122 | """ |
|
118 | """ | |
123 | td = mkdtemp() |
|
119 | td = mkdtemp() | |
124 | tf1 = NamedTemporaryFile(dir=td) |
|
120 | tf1 = NamedTemporaryFile(dir=td) | |
125 | tf2 = NamedTemporaryFile(dir=td) |
|
121 | tf2 = NamedTemporaryFile(dir=td) | |
126 | def fake_formatter(dirname,fnames,included_suffixes): |
|
122 | def fake_formatter(dirname,fnames,included_suffixes): | |
127 | return ["hello","world"] |
|
123 | return ["hello","world"] | |
128 | fl = display.FileLinks(td,notebook_display_formatter=fake_formatter) |
|
124 | fl = display.FileLinks(td,notebook_display_formatter=fake_formatter) | |
129 | actual = fl._repr_html_() |
|
125 | actual = fl._repr_html_() | |
130 | actual = actual.split('\n') |
|
126 | actual = actual.split('\n') | |
131 | actual.sort() |
|
127 | actual.sort() | |
132 | expected = ["hello","world"] |
|
128 | expected = ["hello","world"] | |
133 | expected.sort() |
|
129 | expected.sort() | |
134 | # We compare the sorted list of links here as that's more reliable |
|
130 | # We compare the sorted list of links here as that's more reliable | |
135 | nt.assert_equal(actual,expected) |
|
131 | nt.assert_equal(actual,expected) | |
136 |
|
132 | |||
137 | def test_existing_path_FileLinks_repr(): |
|
133 | def test_existing_path_FileLinks_repr(): | |
138 | """FileLinks: Calling repr() functions as expected on existing directory """ |
|
134 | """FileLinks: Calling repr() functions as expected on existing directory """ | |
139 | td = mkdtemp() |
|
135 | td = mkdtemp() | |
140 | tf1 = NamedTemporaryFile(dir=td) |
|
136 | tf1 = NamedTemporaryFile(dir=td) | |
141 | tf2 = NamedTemporaryFile(dir=td) |
|
137 | tf2 = NamedTemporaryFile(dir=td) | |
142 | fl = display.FileLinks(td) |
|
138 | fl = display.FileLinks(td) | |
143 | actual = repr(fl) |
|
139 | actual = repr(fl) | |
144 | actual = actual.split('\n') |
|
140 | actual = actual.split('\n') | |
145 | actual.sort() |
|
141 | actual.sort() | |
146 | expected = ['%s/' % td, ' %s' % split(tf1.name)[1],' %s' % split(tf2.name)[1]] |
|
142 | expected = ['%s/' % td, ' %s' % split(tf1.name)[1],' %s' % split(tf2.name)[1]] | |
147 | expected.sort() |
|
143 | expected.sort() | |
148 | # We compare the sorted list of links here as that's more reliable |
|
144 | # We compare the sorted list of links here as that's more reliable | |
149 | nt.assert_equal(actual,expected) |
|
145 | nt.assert_equal(actual,expected) | |
150 |
|
146 | |||
151 | def test_existing_path_FileLinks_repr_alt_formatter(): |
|
147 | def test_existing_path_FileLinks_repr_alt_formatter(): | |
152 | """FileLinks: Calling repr() functions as expected w/ alt formatter |
|
148 | """FileLinks: Calling repr() functions as expected w/ alt formatter | |
153 | """ |
|
149 | """ | |
154 | td = mkdtemp() |
|
150 | td = mkdtemp() | |
155 | tf1 = NamedTemporaryFile(dir=td) |
|
151 | tf1 = NamedTemporaryFile(dir=td) | |
156 | tf2 = NamedTemporaryFile(dir=td) |
|
152 | tf2 = NamedTemporaryFile(dir=td) | |
157 | def fake_formatter(dirname,fnames,included_suffixes): |
|
153 | def fake_formatter(dirname,fnames,included_suffixes): | |
158 | return ["hello","world"] |
|
154 | return ["hello","world"] | |
159 | fl = display.FileLinks(td,terminal_display_formatter=fake_formatter) |
|
155 | fl = display.FileLinks(td,terminal_display_formatter=fake_formatter) | |
160 | actual = repr(fl) |
|
156 | actual = repr(fl) | |
161 | actual = actual.split('\n') |
|
157 | actual = actual.split('\n') | |
162 | actual.sort() |
|
158 | actual.sort() | |
163 | expected = ["hello","world"] |
|
159 | expected = ["hello","world"] | |
164 | expected.sort() |
|
160 | expected.sort() | |
165 | # We compare the sorted list of links here as that's more reliable |
|
161 | # We compare the sorted list of links here as that's more reliable | |
166 | nt.assert_equal(actual,expected) |
|
162 | nt.assert_equal(actual,expected) | |
167 |
|
163 | |||
168 | def test_error_on_file_to_FileLinks(): |
|
164 | def test_error_on_file_to_FileLinks(): | |
169 | """FileLinks: Raises error when passed file |
|
165 | """FileLinks: Raises error when passed file | |
170 | """ |
|
166 | """ | |
171 | td = mkdtemp() |
|
167 | td = mkdtemp() | |
172 | tf1 = NamedTemporaryFile(dir=td) |
|
168 | tf1 = NamedTemporaryFile(dir=td) | |
173 | nt.assert_raises(ValueError,display.FileLinks,tf1.name) |
|
169 | nt.assert_raises(ValueError,display.FileLinks,tf1.name) | |
174 |
|
170 | |||
175 | def test_recursive_FileLinks(): |
|
171 | def test_recursive_FileLinks(): | |
176 | """FileLinks: Does not recurse when recursive=False |
|
172 | """FileLinks: Does not recurse when recursive=False | |
177 | """ |
|
173 | """ | |
178 | td = mkdtemp() |
|
174 | td = mkdtemp() | |
179 | tf = NamedTemporaryFile(dir=td) |
|
175 | tf = NamedTemporaryFile(dir=td) | |
180 | subtd = mkdtemp(dir=td) |
|
176 | subtd = mkdtemp(dir=td) | |
181 | subtf = NamedTemporaryFile(dir=subtd) |
|
177 | subtf = NamedTemporaryFile(dir=subtd) | |
182 | fl = display.FileLinks(td) |
|
178 | fl = display.FileLinks(td) | |
183 | actual = str(fl) |
|
179 | actual = str(fl) | |
184 | actual = actual.split('\n') |
|
180 | actual = actual.split('\n') | |
185 | nt.assert_equal(len(actual), 4, actual) |
|
181 | nt.assert_equal(len(actual), 4, actual) | |
186 | fl = display.FileLinks(td, recursive=False) |
|
182 | fl = display.FileLinks(td, recursive=False) | |
187 | actual = str(fl) |
|
183 | actual = str(fl) | |
188 | actual = actual.split('\n') |
|
184 | actual = actual.split('\n') | |
189 | nt.assert_equal(len(actual), 2, actual) |
|
185 | nt.assert_equal(len(actual), 2, actual) | |
190 |
|
186 | |||
191 | def test_audio_from_file(): |
|
187 | def test_audio_from_file(): | |
192 | path = pjoin(dirname(__file__), 'test.wav') |
|
188 | path = pjoin(dirname(__file__), 'test.wav') | |
193 | display.Audio(filename=path) |
|
189 | display.Audio(filename=path) | |
194 |
|
190 | |||
195 | class TestAudioDataWithNumpy(TestCase): |
|
191 | class TestAudioDataWithNumpy(TestCase): | |
196 |
|
192 | |||
197 | @skipif_not_numpy |
|
193 | @skipif_not_numpy | |
198 | def test_audio_from_numpy_array(self): |
|
194 | def test_audio_from_numpy_array(self): | |
199 | test_tone = get_test_tone() |
|
195 | test_tone = get_test_tone() | |
200 | audio = display.Audio(test_tone, rate=44100) |
|
196 | audio = display.Audio(test_tone, rate=44100) | |
201 | nt.assert_equal(len(read_wav(audio.data)), len(test_tone)) |
|
197 | nt.assert_equal(len(read_wav(audio.data)), len(test_tone)) | |
202 |
|
198 | |||
203 | @skipif_not_numpy |
|
199 | @skipif_not_numpy | |
204 | def test_audio_from_list(self): |
|
200 | def test_audio_from_list(self): | |
205 | test_tone = get_test_tone() |
|
201 | test_tone = get_test_tone() | |
206 | audio = display.Audio(list(test_tone), rate=44100) |
|
202 | audio = display.Audio(list(test_tone), rate=44100) | |
207 | nt.assert_equal(len(read_wav(audio.data)), len(test_tone)) |
|
203 | nt.assert_equal(len(read_wav(audio.data)), len(test_tone)) | |
208 |
|
204 | |||
209 | @skipif_not_numpy |
|
205 | @skipif_not_numpy | |
210 | def test_audio_from_numpy_array_without_rate_raises(self): |
|
206 | def test_audio_from_numpy_array_without_rate_raises(self): | |
211 | nt.assert_raises(ValueError, display.Audio, get_test_tone()) |
|
207 | nt.assert_raises(ValueError, display.Audio, get_test_tone()) | |
212 |
|
208 | |||
213 | @skipif_not_numpy |
|
209 | @skipif_not_numpy | |
214 | def test_audio_data_normalization(self): |
|
210 | def test_audio_data_normalization(self): | |
215 | expected_max_value = numpy.iinfo(numpy.int16).max |
|
211 | expected_max_value = numpy.iinfo(numpy.int16).max | |
216 | for scale in [1, 0.5, 2]: |
|
212 | for scale in [1, 0.5, 2]: | |
217 | audio = display.Audio(get_test_tone(scale), rate=44100) |
|
213 | audio = display.Audio(get_test_tone(scale), rate=44100) | |
218 | actual_max_value = numpy.max(numpy.abs(read_wav(audio.data))) |
|
214 | actual_max_value = numpy.max(numpy.abs(read_wav(audio.data))) | |
219 | nt.assert_equal(actual_max_value, expected_max_value) |
|
215 | nt.assert_equal(actual_max_value, expected_max_value) | |
220 |
|
216 | |||
221 | @skipif_not_numpy |
|
217 | @skipif_not_numpy | |
222 | def test_audio_data_without_normalization(self): |
|
218 | def test_audio_data_without_normalization(self): | |
223 | max_int16 = numpy.iinfo(numpy.int16).max |
|
219 | max_int16 = numpy.iinfo(numpy.int16).max | |
224 | for scale in [1, 0.5, 0.2]: |
|
220 | for scale in [1, 0.5, 0.2]: | |
225 | test_tone = get_test_tone(scale) |
|
221 | test_tone = get_test_tone(scale) | |
226 | test_tone_max_abs = numpy.max(numpy.abs(test_tone)) |
|
222 | test_tone_max_abs = numpy.max(numpy.abs(test_tone)) | |
227 | expected_max_value = int(max_int16 * test_tone_max_abs) |
|
223 | expected_max_value = int(max_int16 * test_tone_max_abs) | |
228 | audio = display.Audio(test_tone, rate=44100, normalize=False) |
|
224 | audio = display.Audio(test_tone, rate=44100, normalize=False) | |
229 | actual_max_value = numpy.max(numpy.abs(read_wav(audio.data))) |
|
225 | actual_max_value = numpy.max(numpy.abs(read_wav(audio.data))) | |
230 | nt.assert_equal(actual_max_value, expected_max_value) |
|
226 | nt.assert_equal(actual_max_value, expected_max_value) | |
231 |
|
227 | |||
232 | def test_audio_data_without_normalization_raises_for_invalid_data(self): |
|
228 | def test_audio_data_without_normalization_raises_for_invalid_data(self): | |
233 | nt.assert_raises( |
|
229 | nt.assert_raises( | |
234 | ValueError, |
|
230 | ValueError, | |
235 | lambda: display.Audio([1.001], rate=44100, normalize=False)) |
|
231 | lambda: display.Audio([1.001], rate=44100, normalize=False)) | |
236 | nt.assert_raises( |
|
232 | nt.assert_raises( | |
237 | ValueError, |
|
233 | ValueError, | |
238 | lambda: display.Audio([-1.001], rate=44100, normalize=False)) |
|
234 | lambda: display.Audio([-1.001], rate=44100, normalize=False)) | |
239 |
|
235 | |||
240 | def simulate_numpy_not_installed(): |
|
236 | def simulate_numpy_not_installed(): | |
241 | try: |
|
237 | try: | |
242 | import numpy |
|
238 | import numpy | |
243 | return mock.patch('numpy.array', mock.MagicMock(side_effect=ImportError)) |
|
239 | return mock.patch('numpy.array', mock.MagicMock(side_effect=ImportError)) | |
244 | except ModuleNotFoundError: |
|
240 | except ModuleNotFoundError: | |
245 | return lambda x:x |
|
241 | return lambda x:x | |
246 |
|
242 | |||
247 | @simulate_numpy_not_installed() |
|
243 | @simulate_numpy_not_installed() | |
248 | class TestAudioDataWithoutNumpy(TestAudioDataWithNumpy): |
|
244 | class TestAudioDataWithoutNumpy(TestAudioDataWithNumpy): | |
249 | # All tests from `TestAudioDataWithNumpy` are inherited. |
|
245 | # All tests from `TestAudioDataWithNumpy` are inherited. | |
250 |
|
246 | |||
251 | @skipif_not_numpy |
|
247 | @skipif_not_numpy | |
252 | def test_audio_raises_for_nested_list(self): |
|
248 | def test_audio_raises_for_nested_list(self): | |
253 | stereo_signal = [list(get_test_tone())] * 2 |
|
249 | stereo_signal = [list(get_test_tone())] * 2 | |
254 | nt.assert_raises( |
|
250 | nt.assert_raises( | |
255 | TypeError, |
|
251 | TypeError, | |
256 | lambda: display.Audio(stereo_signal, rate=44100)) |
|
252 | lambda: display.Audio(stereo_signal, rate=44100)) | |
257 |
|
253 | |||
258 | @skipif_not_numpy |
|
254 | @skipif_not_numpy | |
259 | def get_test_tone(scale=1): |
|
255 | def get_test_tone(scale=1): | |
260 | return numpy.sin(2 * numpy.pi * 440 * numpy.linspace(0, 1, 44100)) * scale |
|
256 | return numpy.sin(2 * numpy.pi * 440 * numpy.linspace(0, 1, 44100)) * scale | |
261 |
|
257 | |||
262 | def read_wav(data): |
|
258 | def read_wav(data): | |
263 | with wave.open(BytesIO(data)) as wave_file: |
|
259 | with wave.open(BytesIO(data)) as wave_file: | |
264 | wave_data = wave_file.readframes(wave_file.getnframes()) |
|
260 | wave_data = wave_file.readframes(wave_file.getnframes()) | |
265 | num_samples = wave_file.getnframes() * wave_file.getnchannels() |
|
261 | num_samples = wave_file.getnframes() * wave_file.getnchannels() | |
266 | return struct.unpack('<%sh' % num_samples, wave_data) |
|
262 | return struct.unpack('<%sh' % num_samples, wave_data) | |
267 |
|
263 | |||
268 | def test_code_from_file(): |
|
264 | def test_code_from_file(): | |
269 | c = display.Code(filename=__file__) |
|
265 | c = display.Code(filename=__file__) | |
270 | assert c._repr_html_().startswith('<style>') |
|
266 | assert c._repr_html_().startswith('<style>') |
@@ -1,71 +1,71 b'' | |||||
1 | # coding: utf-8 |
|
1 | # coding: utf-8 | |
2 | """ |
|
2 | """ | |
3 | Utilities for dealing with text encodings |
|
3 | Utilities for dealing with text encodings | |
4 | """ |
|
4 | """ | |
5 |
|
5 | |||
6 | #----------------------------------------------------------------------------- |
|
6 | #----------------------------------------------------------------------------- | |
7 | # Copyright (C) 2008-2012 The IPython Development Team |
|
7 | # Copyright (C) 2008-2012 The IPython Development Team | |
8 | # |
|
8 | # | |
9 | # Distributed under the terms of the BSD License. The full license is in |
|
9 | # Distributed under the terms of the BSD License. The full license is in | |
10 | # the file COPYING, distributed as part of this software. |
|
10 | # the file COPYING, distributed as part of this software. | |
11 | #----------------------------------------------------------------------------- |
|
11 | #----------------------------------------------------------------------------- | |
12 |
|
12 | |||
13 | #----------------------------------------------------------------------------- |
|
13 | #----------------------------------------------------------------------------- | |
14 | # Imports |
|
14 | # Imports | |
15 | #----------------------------------------------------------------------------- |
|
15 | #----------------------------------------------------------------------------- | |
16 | import sys |
|
16 | import sys | |
17 | import locale |
|
17 | import locale | |
18 | import warnings |
|
18 | import warnings | |
19 |
|
19 | |||
20 | # to deal with the possibility of sys.std* not being a stream at all |
|
20 | # to deal with the possibility of sys.std* not being a stream at all | |
21 | def get_stream_enc(stream, default=None): |
|
21 | def get_stream_enc(stream, default=None): | |
22 | """Return the given stream's encoding or a default. |
|
22 | """Return the given stream's encoding or a default. | |
23 |
|
23 | |||
24 | There are cases where ``sys.std*`` might not actually be a stream, so |
|
24 | There are cases where ``sys.std*`` might not actually be a stream, so | |
25 | check for the encoding attribute prior to returning it, and return |
|
25 | check for the encoding attribute prior to returning it, and return | |
26 | a default if it doesn't exist or evaluates as False. ``default`` |
|
26 | a default if it doesn't exist or evaluates as False. ``default`` | |
27 | is None if not provided. |
|
27 | is None if not provided. | |
28 | """ |
|
28 | """ | |
29 | if not hasattr(stream, 'encoding') or not stream.encoding: |
|
29 | if not hasattr(stream, 'encoding') or not stream.encoding: | |
30 | return default |
|
30 | return default | |
31 | else: |
|
31 | else: | |
32 | return stream.encoding |
|
32 | return stream.encoding | |
33 |
|
33 | |||
34 | # Less conservative replacement for sys.getdefaultencoding, that will try |
|
34 | # Less conservative replacement for sys.getdefaultencoding, that will try | |
35 | # to match the environment. |
|
35 | # to match the environment. | |
36 | # Defined here as central function, so if we find better choices, we |
|
36 | # Defined here as central function, so if we find better choices, we | |
37 | # won't need to make changes all over IPython. |
|
37 | # won't need to make changes all over IPython. | |
38 | def getdefaultencoding(prefer_stream=True): |
|
38 | def getdefaultencoding(prefer_stream=True): | |
39 | """Return IPython's guess for the default encoding for bytes as text. |
|
39 | """Return IPython's guess for the default encoding for bytes as text. | |
40 |
|
40 | |||
41 | If prefer_stream is True (default), asks for stdin.encoding first, |
|
41 | If prefer_stream is True (default), asks for stdin.encoding first, | |
42 | to match the calling Terminal, but that is often None for subprocesses. |
|
42 | to match the calling Terminal, but that is often None for subprocesses. | |
43 |
|
43 | |||
44 | Then fall back on locale.getpreferredencoding(), |
|
44 | Then fall back on locale.getpreferredencoding(), | |
45 | which should be a sensible platform default (that respects LANG environment), |
|
45 | which should be a sensible platform default (that respects LANG environment), | |
46 | and finally to sys.getdefaultencoding() which is the most conservative option, |
|
46 | and finally to sys.getdefaultencoding() which is the most conservative option, | |
47 |
and usually |
|
47 | and usually UTF8 as of Python 3. | |
48 | """ |
|
48 | """ | |
49 | enc = None |
|
49 | enc = None | |
50 | if prefer_stream: |
|
50 | if prefer_stream: | |
51 | enc = get_stream_enc(sys.stdin) |
|
51 | enc = get_stream_enc(sys.stdin) | |
52 | if not enc or enc=='ascii': |
|
52 | if not enc or enc=='ascii': | |
53 | try: |
|
53 | try: | |
54 | # There are reports of getpreferredencoding raising errors |
|
54 | # There are reports of getpreferredencoding raising errors | |
55 | # in some cases, which may well be fixed, but let's be conservative here. |
|
55 | # in some cases, which may well be fixed, but let's be conservative here. | |
56 | enc = locale.getpreferredencoding() |
|
56 | enc = locale.getpreferredencoding() | |
57 | except Exception: |
|
57 | except Exception: | |
58 | pass |
|
58 | pass | |
59 | enc = enc or sys.getdefaultencoding() |
|
59 | enc = enc or sys.getdefaultencoding() | |
60 | # On windows `cp0` can be returned to indicate that there is no code page. |
|
60 | # On windows `cp0` can be returned to indicate that there is no code page. | |
61 | # Since cp0 is an invalid encoding return instead cp1252 which is the |
|
61 | # Since cp0 is an invalid encoding return instead cp1252 which is the | |
62 | # Western European default. |
|
62 | # Western European default. | |
63 | if enc == 'cp0': |
|
63 | if enc == 'cp0': | |
64 | warnings.warn( |
|
64 | warnings.warn( | |
65 | "Invalid code page cp0 detected - using cp1252 instead." |
|
65 | "Invalid code page cp0 detected - using cp1252 instead." | |
66 | "If cp1252 is incorrect please ensure a valid code page " |
|
66 | "If cp1252 is incorrect please ensure a valid code page " | |
67 | "is defined for the process.", RuntimeWarning) |
|
67 | "is defined for the process.", RuntimeWarning) | |
68 | return 'cp1252' |
|
68 | return 'cp1252' | |
69 | return enc |
|
69 | return enc | |
70 |
|
70 | |||
71 | DEFAULT_ENCODING = getdefaultencoding() |
|
71 | DEFAULT_ENCODING = getdefaultencoding() |
@@ -1,105 +1,103 b'' | |||||
1 | """ |
|
1 | """ | |
2 | Tools to open .py files as Unicode, using the encoding specified within the file, |
|
2 | Tools to open .py files as Unicode, using the encoding specified within the file, | |
3 | as per PEP 263. |
|
3 | as per PEP 263. | |
4 |
|
4 | |||
5 | Much of the code is taken from the tokenize module in Python 3.2. |
|
5 | Much of the code is taken from the tokenize module in Python 3.2. | |
6 | """ |
|
6 | """ | |
7 |
|
7 | |||
8 | import io |
|
8 | import io | |
9 | from io import TextIOWrapper, BytesIO |
|
9 | from io import TextIOWrapper, BytesIO | |
10 | import re |
|
10 | import re | |
11 | from tokenize import open, detect_encoding |
|
11 | from tokenize import open, detect_encoding | |
12 |
|
12 | |||
13 | cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE) |
|
13 | cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE) | |
14 | cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE) |
|
14 | cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE) | |
15 |
|
15 | |||
16 | def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): |
|
16 | def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): | |
17 | """Converts a bytes string with python source code to unicode. |
|
17 | """Converts a bytes string with python source code to unicode. | |
18 |
|
18 | |||
19 | Unicode strings are passed through unchanged. Byte strings are checked |
|
19 | Unicode strings are passed through unchanged. Byte strings are checked | |
20 | for the python source file encoding cookie to determine encoding. |
|
20 | for the python source file encoding cookie to determine encoding. | |
21 | txt can be either a bytes buffer or a string containing the source |
|
21 | txt can be either a bytes buffer or a string containing the source | |
22 | code. |
|
22 | code. | |
23 | """ |
|
23 | """ | |
24 | if isinstance(txt, str): |
|
24 | if isinstance(txt, str): | |
25 | return txt |
|
25 | return txt | |
26 | if isinstance(txt, bytes): |
|
26 | if isinstance(txt, bytes): | |
27 | buffer = BytesIO(txt) |
|
27 | buffer = BytesIO(txt) | |
28 | else: |
|
28 | else: | |
29 | buffer = txt |
|
29 | buffer = txt | |
30 | try: |
|
30 | try: | |
31 | encoding, _ = detect_encoding(buffer.readline) |
|
31 | encoding, _ = detect_encoding(buffer.readline) | |
32 | except SyntaxError: |
|
32 | except SyntaxError: | |
33 | encoding = "ascii" |
|
33 | encoding = "ascii" | |
34 | buffer.seek(0) |
|
34 | buffer.seek(0) | |
35 | with TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) as text: |
|
35 | with TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) as text: | |
36 | text.mode = 'r' |
|
36 | text.mode = 'r' | |
37 | if skip_encoding_cookie: |
|
37 | if skip_encoding_cookie: | |
38 | return u"".join(strip_encoding_cookie(text)) |
|
38 | return u"".join(strip_encoding_cookie(text)) | |
39 | else: |
|
39 | else: | |
40 | return text.read() |
|
40 | return text.read() | |
41 |
|
41 | |||
42 | def strip_encoding_cookie(filelike): |
|
42 | def strip_encoding_cookie(filelike): | |
43 | """Generator to pull lines from a text-mode file, skipping the encoding |
|
43 | """Generator to pull lines from a text-mode file, skipping the encoding | |
44 | cookie if it is found in the first two lines. |
|
44 | cookie if it is found in the first two lines. | |
45 | """ |
|
45 | """ | |
46 | it = iter(filelike) |
|
46 | it = iter(filelike) | |
47 | try: |
|
47 | try: | |
48 | first = next(it) |
|
48 | first = next(it) | |
49 | if not cookie_comment_re.match(first): |
|
49 | if not cookie_comment_re.match(first): | |
50 | yield first |
|
50 | yield first | |
51 | second = next(it) |
|
51 | second = next(it) | |
52 | if not cookie_comment_re.match(second): |
|
52 | if not cookie_comment_re.match(second): | |
53 | yield second |
|
53 | yield second | |
54 | except StopIteration: |
|
54 | except StopIteration: | |
55 | return |
|
55 | return | |
56 |
|
56 | |||
57 | for line in it: |
|
57 | for line in it: | |
58 | yield line |
|
58 | yield line | |
59 |
|
59 | |||
60 | def read_py_file(filename, skip_encoding_cookie=True): |
|
60 | def read_py_file(filename, skip_encoding_cookie=True): | |
61 | """Read a Python file, using the encoding declared inside the file. |
|
61 | """Read a Python file, using the encoding declared inside the file. | |
62 |
|
62 | |||
63 | Parameters |
|
63 | Parameters | |
64 | ---------- |
|
64 | ---------- | |
65 | filename : str |
|
65 | filename : str | |
66 | The path to the file to read. |
|
66 | The path to the file to read. | |
67 | skip_encoding_cookie : bool |
|
67 | skip_encoding_cookie : bool | |
68 | If True (the default), and the encoding declaration is found in the first |
|
68 | If True (the default), and the encoding declaration is found in the first | |
69 |
two lines, that line will be excluded from the output |
|
69 | two lines, that line will be excluded from the output. | |
70 | unicode string with an encoding declaration is a SyntaxError in Python 2. |
|
|||
71 |
|
70 | |||
72 | Returns |
|
71 | Returns | |
73 | ------- |
|
72 | ------- | |
74 | A unicode string containing the contents of the file. |
|
73 | A unicode string containing the contents of the file. | |
75 | """ |
|
74 | """ | |
76 | with open(filename) as f: # the open function defined in this module. |
|
75 | with open(filename) as f: # the open function defined in this module. | |
77 | if skip_encoding_cookie: |
|
76 | if skip_encoding_cookie: | |
78 | return "".join(strip_encoding_cookie(f)) |
|
77 | return "".join(strip_encoding_cookie(f)) | |
79 | else: |
|
78 | else: | |
80 | return f.read() |
|
79 | return f.read() | |
81 |
|
80 | |||
82 | def read_py_url(url, errors='replace', skip_encoding_cookie=True): |
|
81 | def read_py_url(url, errors='replace', skip_encoding_cookie=True): | |
83 | """Read a Python file from a URL, using the encoding declared inside the file. |
|
82 | """Read a Python file from a URL, using the encoding declared inside the file. | |
84 |
|
83 | |||
85 | Parameters |
|
84 | Parameters | |
86 | ---------- |
|
85 | ---------- | |
87 | url : str |
|
86 | url : str | |
88 | The URL from which to fetch the file. |
|
87 | The URL from which to fetch the file. | |
89 | errors : str |
|
88 | errors : str | |
90 | How to handle decoding errors in the file. Options are the same as for |
|
89 | How to handle decoding errors in the file. Options are the same as for | |
91 | bytes.decode(), but here 'replace' is the default. |
|
90 | bytes.decode(), but here 'replace' is the default. | |
92 | skip_encoding_cookie : bool |
|
91 | skip_encoding_cookie : bool | |
93 | If True (the default), and the encoding declaration is found in the first |
|
92 | If True (the default), and the encoding declaration is found in the first | |
94 |
two lines, that line will be excluded from the output |
|
93 | two lines, that line will be excluded from the output. | |
95 | unicode string with an encoding declaration is a SyntaxError in Python 2. |
|
|||
96 |
|
94 | |||
97 | Returns |
|
95 | Returns | |
98 | ------- |
|
96 | ------- | |
99 | A unicode string containing the contents of the file. |
|
97 | A unicode string containing the contents of the file. | |
100 | """ |
|
98 | """ | |
101 | # Deferred import for faster start |
|
99 | # Deferred import for faster start | |
102 | from urllib.request import urlopen |
|
100 | from urllib.request import urlopen | |
103 | response = urlopen(url) |
|
101 | response = urlopen(url) | |
104 | buffer = io.BytesIO(response.read()) |
|
102 | buffer = io.BytesIO(response.read()) | |
105 | return source_to_unicode(buffer, errors, skip_encoding_cookie) |
|
103 | return source_to_unicode(buffer, errors, skip_encoding_cookie) |
@@ -1,39 +1,38 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | """Extract a session from the IPython input history. |
|
2 | """Extract a session from the IPython input history. | |
3 |
|
3 | |||
4 | Usage: |
|
4 | Usage: | |
5 | ipython-get-history.py sessionnumber [outputfile] |
|
5 | ipython-get-history.py sessionnumber [outputfile] | |
6 |
|
6 | |||
7 | If outputfile is not given, the relevant history is written to stdout. If |
|
7 | If outputfile is not given, the relevant history is written to stdout. If | |
8 | outputfile has a .py extension, the translated history (without IPython's |
|
8 | outputfile has a .py extension, the translated history (without IPython's | |
9 | special syntax) will be extracted. |
|
9 | special syntax) will be extracted. | |
10 |
|
10 | |||
11 | Example: |
|
11 | Example: | |
12 | ./ipython-get-history.py 57 record.ipy |
|
12 | ./ipython-get-history.py 57 record.ipy | |
13 |
|
13 | |||
14 |
|
14 | |||
15 | This script is a simple demonstration of HistoryAccessor. It should be possible |
|
15 | This script is a simple demonstration of HistoryAccessor. It should be possible | |
16 | to build much more flexible and powerful tools to browse and pull from the |
|
16 | to build much more flexible and powerful tools to browse and pull from the | |
17 | history database. |
|
17 | history database. | |
18 | """ |
|
18 | """ | |
19 | import sys |
|
19 | import sys | |
20 |
|
20 | |||
21 | from IPython.core.history import HistoryAccessor |
|
21 | from IPython.core.history import HistoryAccessor | |
22 |
|
22 | |||
23 | session_number = int(sys.argv[1]) |
|
23 | session_number = int(sys.argv[1]) | |
24 | if len(sys.argv) > 2: |
|
24 | if len(sys.argv) > 2: | |
25 | dest = open(sys.argv[2], "w") |
|
25 | dest = open(sys.argv[2], "w") | |
26 | raw = not sys.argv[2].endswith('.py') |
|
26 | raw = not sys.argv[2].endswith('.py') | |
27 | else: |
|
27 | else: | |
28 | dest = sys.stdout |
|
28 | dest = sys.stdout | |
29 | raw = True |
|
29 | raw = True | |
30 |
|
30 | |||
31 | with dest: |
|
31 | with dest: | |
32 | dest.write("# coding: utf-8\n") |
|
32 | dest.write("# coding: utf-8\n") | |
33 |
|
33 | |||
34 | # Profiles other than 'default' can be specified here with a profile= argument: |
|
34 | # Profiles other than 'default' can be specified here with a profile= argument: | |
35 | hist = HistoryAccessor() |
|
35 | hist = HistoryAccessor() | |
36 |
|
36 | |||
37 | for session, lineno, cell in hist.get_range(session=session_number, raw=raw): |
|
37 | for session, lineno, cell in hist.get_range(session=session_number, raw=raw): | |
38 | cell = cell.encode('utf-8') # This line is only needed on Python 2. |
|
|||
39 | dest.write(cell + '\n') |
|
38 | dest.write(cell + '\n') |
General Comments 0
You need to be logged in to leave comments.
Login now