Show More
@@ -1,1032 +1,1031 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """Tools for inspecting Python objects. |
|
2 | """Tools for inspecting Python objects. | |
3 |
|
3 | |||
4 | Uses syntax highlighting for presenting the various information elements. |
|
4 | Uses syntax highlighting for presenting the various information elements. | |
5 |
|
5 | |||
6 | Similar in spirit to the inspect module, but all calls take a name argument to |
|
6 | Similar in spirit to the inspect module, but all calls take a name argument to | |
7 | reference the name under which an object is being read. |
|
7 | reference the name under which an object is being read. | |
8 | """ |
|
8 | """ | |
9 |
|
9 | |||
10 | # Copyright (c) IPython Development Team. |
|
10 | # Copyright (c) IPython Development Team. | |
11 | # Distributed under the terms of the Modified BSD License. |
|
11 | # Distributed under the terms of the Modified BSD License. | |
12 |
|
12 | |||
13 | __all__ = ['Inspector','InspectColors'] |
|
13 | __all__ = ['Inspector','InspectColors'] | |
14 |
|
14 | |||
15 | # stdlib modules |
|
15 | # stdlib modules | |
16 | import ast |
|
16 | import ast | |
17 | import inspect |
|
17 | import inspect | |
18 | from inspect import signature |
|
18 | from inspect import signature | |
19 | import linecache |
|
19 | import linecache | |
20 | import warnings |
|
20 | import warnings | |
21 | import os |
|
21 | import os | |
22 | from textwrap import dedent |
|
22 | from textwrap import dedent | |
23 | import types |
|
23 | import types | |
24 | import io as stdlib_io |
|
24 | import io as stdlib_io | |
25 |
|
25 | |||
26 | from typing import Union |
|
26 | from typing import Union | |
27 |
|
27 | |||
28 | # IPython's own |
|
28 | # IPython's own | |
29 | from IPython.core import page |
|
29 | from IPython.core import page | |
30 | from IPython.lib.pretty import pretty |
|
30 | from IPython.lib.pretty import pretty | |
31 | from IPython.testing.skipdoctest import skip_doctest |
|
31 | from IPython.testing.skipdoctest import skip_doctest | |
32 | from IPython.utils import PyColorize |
|
32 | from IPython.utils import PyColorize | |
33 | from IPython.utils import openpy |
|
33 | from IPython.utils import openpy | |
34 | from IPython.utils import py3compat |
|
34 | from IPython.utils import py3compat | |
35 | from IPython.utils.dir2 import safe_hasattr |
|
35 | from IPython.utils.dir2 import safe_hasattr | |
36 | from IPython.utils.path import compress_user |
|
36 | from IPython.utils.path import compress_user | |
37 | from IPython.utils.text import indent |
|
37 | from IPython.utils.text import indent | |
38 | from IPython.utils.wildcard import list_namespace |
|
38 | from IPython.utils.wildcard import list_namespace | |
39 | from IPython.utils.wildcard import typestr2type |
|
39 | from IPython.utils.wildcard import typestr2type | |
40 | from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable |
|
40 | from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable | |
41 | from IPython.utils.py3compat import cast_unicode |
|
41 | from IPython.utils.py3compat import cast_unicode | |
42 | from IPython.utils.colorable import Colorable |
|
42 | from IPython.utils.colorable import Colorable | |
43 | from IPython.utils.decorators import undoc |
|
43 | from IPython.utils.decorators import undoc | |
44 |
|
44 | |||
45 | from pygments import highlight |
|
45 | from pygments import highlight | |
46 | from pygments.lexers import PythonLexer |
|
46 | from pygments.lexers import PythonLexer | |
47 | from pygments.formatters import HtmlFormatter |
|
47 | from pygments.formatters import HtmlFormatter | |
48 |
|
48 | |||
49 | def pylight(code): |
|
49 | def pylight(code): | |
50 | return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True)) |
|
50 | return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True)) | |
51 |
|
51 | |||
52 | # builtin docstrings to ignore |
|
52 | # builtin docstrings to ignore | |
53 | _func_call_docstring = types.FunctionType.__call__.__doc__ |
|
53 | _func_call_docstring = types.FunctionType.__call__.__doc__ | |
54 | _object_init_docstring = object.__init__.__doc__ |
|
54 | _object_init_docstring = object.__init__.__doc__ | |
55 | _builtin_type_docstrings = { |
|
55 | _builtin_type_docstrings = { | |
56 | inspect.getdoc(t) for t in (types.ModuleType, types.MethodType, |
|
56 | inspect.getdoc(t) for t in (types.ModuleType, types.MethodType, | |
57 | types.FunctionType, property) |
|
57 | types.FunctionType, property) | |
58 | } |
|
58 | } | |
59 |
|
59 | |||
60 | _builtin_func_type = type(all) |
|
60 | _builtin_func_type = type(all) | |
61 | _builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions |
|
61 | _builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions | |
62 | #**************************************************************************** |
|
62 | #**************************************************************************** | |
63 | # Builtin color schemes |
|
63 | # Builtin color schemes | |
64 |
|
64 | |||
65 | Colors = TermColors # just a shorthand |
|
65 | Colors = TermColors # just a shorthand | |
66 |
|
66 | |||
67 | InspectColors = PyColorize.ANSICodeColors |
|
67 | InspectColors = PyColorize.ANSICodeColors | |
68 |
|
68 | |||
69 | #**************************************************************************** |
|
69 | #**************************************************************************** | |
70 | # Auxiliary functions and objects |
|
70 | # Auxiliary functions and objects | |
71 |
|
71 | |||
72 | # See the messaging spec for the definition of all these fields. This list |
|
72 | # See the messaging spec for the definition of all these fields. This list | |
73 | # effectively defines the order of display |
|
73 | # effectively defines the order of display | |
74 | info_fields = ['type_name', 'base_class', 'string_form', 'namespace', |
|
74 | info_fields = ['type_name', 'base_class', 'string_form', 'namespace', | |
75 | 'length', 'file', 'definition', 'docstring', 'source', |
|
75 | 'length', 'file', 'definition', 'docstring', 'source', | |
76 | 'init_definition', 'class_docstring', 'init_docstring', |
|
76 | 'init_definition', 'class_docstring', 'init_docstring', | |
77 | 'call_def', 'call_docstring', |
|
77 | 'call_def', 'call_docstring', | |
78 | # These won't be printed but will be used to determine how to |
|
78 | # These won't be printed but will be used to determine how to | |
79 | # format the object |
|
79 | # format the object | |
80 | 'ismagic', 'isalias', 'isclass', 'found', 'name' |
|
80 | 'ismagic', 'isalias', 'isclass', 'found', 'name' | |
81 | ] |
|
81 | ] | |
82 |
|
82 | |||
83 |
|
83 | |||
84 | def object_info(**kw): |
|
84 | def object_info(**kw): | |
85 | """Make an object info dict with all fields present.""" |
|
85 | """Make an object info dict with all fields present.""" | |
86 | infodict = {k:None for k in info_fields} |
|
86 | infodict = {k:None for k in info_fields} | |
87 | infodict.update(kw) |
|
87 | infodict.update(kw) | |
88 | return infodict |
|
88 | return infodict | |
89 |
|
89 | |||
90 |
|
90 | |||
91 | def get_encoding(obj): |
|
91 | def get_encoding(obj): | |
92 | """Get encoding for python source file defining obj |
|
92 | """Get encoding for python source file defining obj | |
93 |
|
93 | |||
94 | Returns None if obj is not defined in a sourcefile. |
|
94 | Returns None if obj is not defined in a sourcefile. | |
95 | """ |
|
95 | """ | |
96 | ofile = find_file(obj) |
|
96 | ofile = find_file(obj) | |
97 | # run contents of file through pager starting at line where the object |
|
97 | # run contents of file through pager starting at line where the object | |
98 | # is defined, as long as the file isn't binary and is actually on the |
|
98 | # is defined, as long as the file isn't binary and is actually on the | |
99 | # filesystem. |
|
99 | # filesystem. | |
100 | if ofile is None: |
|
100 | if ofile is None: | |
101 | return None |
|
101 | return None | |
102 | elif ofile.endswith(('.so', '.dll', '.pyd')): |
|
102 | elif ofile.endswith(('.so', '.dll', '.pyd')): | |
103 | return None |
|
103 | return None | |
104 | elif not os.path.isfile(ofile): |
|
104 | elif not os.path.isfile(ofile): | |
105 | return None |
|
105 | return None | |
106 | else: |
|
106 | else: | |
107 | # Print only text files, not extension binaries. Note that |
|
107 | # Print only text files, not extension binaries. Note that | |
108 | # getsourcelines returns lineno with 1-offset and page() uses |
|
108 | # getsourcelines returns lineno with 1-offset and page() uses | |
109 | # 0-offset, so we must adjust. |
|
109 | # 0-offset, so we must adjust. | |
110 | with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2 |
|
110 | with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2 | |
111 | encoding, lines = openpy.detect_encoding(buffer.readline) |
|
111 | encoding, lines = openpy.detect_encoding(buffer.readline) | |
112 | return encoding |
|
112 | return encoding | |
113 |
|
113 | |||
114 | def getdoc(obj) -> Union[str,None]: |
|
114 | def getdoc(obj) -> Union[str,None]: | |
115 | """Stable wrapper around inspect.getdoc. |
|
115 | """Stable wrapper around inspect.getdoc. | |
116 |
|
116 | |||
117 | This can't crash because of attribute problems. |
|
117 | This can't crash because of attribute problems. | |
118 |
|
118 | |||
119 | It also attempts to call a getdoc() method on the given object. This |
|
119 | It also attempts to call a getdoc() method on the given object. This | |
120 | allows objects which provide their docstrings via non-standard mechanisms |
|
120 | allows objects which provide their docstrings via non-standard mechanisms | |
121 | (like Pyro proxies) to still be inspected by ipython's ? system. |
|
121 | (like Pyro proxies) to still be inspected by ipython's ? system. | |
122 | """ |
|
122 | """ | |
123 | # Allow objects to offer customized documentation via a getdoc method: |
|
123 | # Allow objects to offer customized documentation via a getdoc method: | |
124 | try: |
|
124 | try: | |
125 | ds = obj.getdoc() |
|
125 | ds = obj.getdoc() | |
126 | except Exception: |
|
126 | except Exception: | |
127 | pass |
|
127 | pass | |
128 | else: |
|
128 | else: | |
129 | if isinstance(ds, str): |
|
129 | if isinstance(ds, str): | |
130 | return inspect.cleandoc(ds) |
|
130 | return inspect.cleandoc(ds) | |
131 | docstr = inspect.getdoc(obj) |
|
131 | docstr = inspect.getdoc(obj) | |
132 | return docstr |
|
132 | return docstr | |
133 |
|
133 | |||
134 |
|
134 | |||
135 | def getsource(obj, oname='') -> Union[str,None]: |
|
135 | def getsource(obj, oname='') -> Union[str,None]: | |
136 | """Wrapper around inspect.getsource. |
|
136 | """Wrapper around inspect.getsource. | |
137 |
|
137 | |||
138 | This can be modified by other projects to provide customized source |
|
138 | This can be modified by other projects to provide customized source | |
139 | extraction. |
|
139 | extraction. | |
140 |
|
140 | |||
141 | Parameters |
|
141 | Parameters | |
142 | ---------- |
|
142 | ---------- | |
143 | obj : object |
|
143 | obj : object | |
144 | an object whose source code we will attempt to extract |
|
144 | an object whose source code we will attempt to extract | |
145 | oname : str |
|
145 | oname : str | |
146 | (optional) a name under which the object is known |
|
146 | (optional) a name under which the object is known | |
147 |
|
147 | |||
148 | Returns |
|
148 | Returns | |
149 | ------- |
|
149 | ------- | |
150 | src : unicode or None |
|
150 | src : unicode or None | |
151 |
|
151 | |||
152 | """ |
|
152 | """ | |
153 |
|
153 | |||
154 | if isinstance(obj, property): |
|
154 | if isinstance(obj, property): | |
155 | sources = [] |
|
155 | sources = [] | |
156 | for attrname in ['fget', 'fset', 'fdel']: |
|
156 | for attrname in ['fget', 'fset', 'fdel']: | |
157 | fn = getattr(obj, attrname) |
|
157 | fn = getattr(obj, attrname) | |
158 | if fn is not None: |
|
158 | if fn is not None: | |
159 | encoding = get_encoding(fn) |
|
159 | encoding = get_encoding(fn) | |
160 | oname_prefix = ('%s.' % oname) if oname else '' |
|
160 | oname_prefix = ('%s.' % oname) if oname else '' | |
161 | sources.append(''.join(('# ', oname_prefix, attrname))) |
|
161 | sources.append(''.join(('# ', oname_prefix, attrname))) | |
162 | if inspect.isfunction(fn): |
|
162 | if inspect.isfunction(fn): | |
163 | sources.append(dedent(getsource(fn))) |
|
163 | sources.append(dedent(getsource(fn))) | |
164 | else: |
|
164 | else: | |
165 | # Default str/repr only prints function name, |
|
165 | # Default str/repr only prints function name, | |
166 | # pretty.pretty prints module name too. |
|
166 | # pretty.pretty prints module name too. | |
167 | sources.append( |
|
167 | sources.append( | |
168 | '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn)) |
|
168 | '%s%s = %s\n' % (oname_prefix, attrname, pretty(fn)) | |
169 | ) |
|
169 | ) | |
170 | if sources: |
|
170 | if sources: | |
171 | return '\n'.join(sources) |
|
171 | return '\n'.join(sources) | |
172 | else: |
|
172 | else: | |
173 | return None |
|
173 | return None | |
174 |
|
174 | |||
175 | else: |
|
175 | else: | |
176 | # Get source for non-property objects. |
|
176 | # Get source for non-property objects. | |
177 |
|
177 | |||
178 | obj = _get_wrapped(obj) |
|
178 | obj = _get_wrapped(obj) | |
179 |
|
179 | |||
180 | try: |
|
180 | try: | |
181 | src = inspect.getsource(obj) |
|
181 | src = inspect.getsource(obj) | |
182 | except TypeError: |
|
182 | except TypeError: | |
183 | # The object itself provided no meaningful source, try looking for |
|
183 | # The object itself provided no meaningful source, try looking for | |
184 | # its class definition instead. |
|
184 | # its class definition instead. | |
185 | if hasattr(obj, '__class__'): |
|
185 | if hasattr(obj, '__class__'): | |
186 | try: |
|
186 | try: | |
187 | src = inspect.getsource(obj.__class__) |
|
187 | src = inspect.getsource(obj.__class__) | |
188 | except TypeError: |
|
188 | except TypeError: | |
189 | return None |
|
189 | return None | |
190 |
|
190 | |||
191 | return src |
|
191 | return src | |
192 |
|
192 | |||
193 |
|
193 | |||
194 | def is_simple_callable(obj): |
|
194 | def is_simple_callable(obj): | |
195 | """True if obj is a function ()""" |
|
195 | """True if obj is a function ()""" | |
196 | return (inspect.isfunction(obj) or inspect.ismethod(obj) or \ |
|
196 | return (inspect.isfunction(obj) or inspect.ismethod(obj) or \ | |
197 | isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) |
|
197 | isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) | |
198 |
|
198 | |||
199 | @undoc |
|
199 | @undoc | |
200 | def getargspec(obj): |
|
200 | def getargspec(obj): | |
201 |
"""Wrapper around :func:`inspect.getfullargspec` |
|
201 | """Wrapper around :func:`inspect.getfullargspec` | |
202 | :func:inspect.getargspec` on Python 2. |
|
|||
203 |
|
202 | |||
204 | In addition to functions and methods, this can also handle objects with a |
|
203 | In addition to functions and methods, this can also handle objects with a | |
205 | ``__call__`` attribute. |
|
204 | ``__call__`` attribute. | |
206 |
|
205 | |||
207 | DEPRECATED: Deprecated since 7.10. Do not use, will be removed. |
|
206 | DEPRECATED: Deprecated since 7.10. Do not use, will be removed. | |
208 | """ |
|
207 | """ | |
209 |
|
208 | |||
210 | warnings.warn('`getargspec` function is deprecated as of IPython 7.10' |
|
209 | warnings.warn('`getargspec` function is deprecated as of IPython 7.10' | |
211 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
210 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) | |
212 |
|
211 | |||
213 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
212 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): | |
214 | obj = obj.__call__ |
|
213 | obj = obj.__call__ | |
215 |
|
214 | |||
216 | return inspect.getfullargspec(obj) |
|
215 | return inspect.getfullargspec(obj) | |
217 |
|
216 | |||
218 | @undoc |
|
217 | @undoc | |
219 | def format_argspec(argspec): |
|
218 | def format_argspec(argspec): | |
220 | """Format argspect, convenience wrapper around inspect's. |
|
219 | """Format argspect, convenience wrapper around inspect's. | |
221 |
|
220 | |||
222 | This takes a dict instead of ordered arguments and calls |
|
221 | This takes a dict instead of ordered arguments and calls | |
223 | inspect.format_argspec with the arguments in the necessary order. |
|
222 | inspect.format_argspec with the arguments in the necessary order. | |
224 |
|
223 | |||
225 | DEPRECATED: Do not use; will be removed in future versions. |
|
224 | DEPRECATED: Do not use; will be removed in future versions. | |
226 | """ |
|
225 | """ | |
227 |
|
226 | |||
228 | warnings.warn('`format_argspec` function is deprecated as of IPython 7.10' |
|
227 | warnings.warn('`format_argspec` function is deprecated as of IPython 7.10' | |
229 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
228 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) | |
230 |
|
229 | |||
231 |
|
230 | |||
232 | return inspect.formatargspec(argspec['args'], argspec['varargs'], |
|
231 | return inspect.formatargspec(argspec['args'], argspec['varargs'], | |
233 | argspec['varkw'], argspec['defaults']) |
|
232 | argspec['varkw'], argspec['defaults']) | |
234 |
|
233 | |||
235 | @undoc |
|
234 | @undoc | |
236 | def call_tip(oinfo, format_call=True): |
|
235 | def call_tip(oinfo, format_call=True): | |
237 | """DEPRECATED. Extract call tip data from an oinfo dict. |
|
236 | """DEPRECATED. Extract call tip data from an oinfo dict. | |
238 | """ |
|
237 | """ | |
239 | warnings.warn('`call_tip` function is deprecated as of IPython 6.0' |
|
238 | warnings.warn('`call_tip` function is deprecated as of IPython 6.0' | |
240 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) |
|
239 | 'and will be removed in future versions.', DeprecationWarning, stacklevel=2) | |
241 | # Get call definition |
|
240 | # Get call definition | |
242 | argspec = oinfo.get('argspec') |
|
241 | argspec = oinfo.get('argspec') | |
243 | if argspec is None: |
|
242 | if argspec is None: | |
244 | call_line = None |
|
243 | call_line = None | |
245 | else: |
|
244 | else: | |
246 | # Callable objects will have 'self' as their first argument, prune |
|
245 | # Callable objects will have 'self' as their first argument, prune | |
247 | # it out if it's there for clarity (since users do *not* pass an |
|
246 | # it out if it's there for clarity (since users do *not* pass an | |
248 | # extra first argument explicitly). |
|
247 | # extra first argument explicitly). | |
249 | try: |
|
248 | try: | |
250 | has_self = argspec['args'][0] == 'self' |
|
249 | has_self = argspec['args'][0] == 'self' | |
251 | except (KeyError, IndexError): |
|
250 | except (KeyError, IndexError): | |
252 | pass |
|
251 | pass | |
253 | else: |
|
252 | else: | |
254 | if has_self: |
|
253 | if has_self: | |
255 | argspec['args'] = argspec['args'][1:] |
|
254 | argspec['args'] = argspec['args'][1:] | |
256 |
|
255 | |||
257 | call_line = oinfo['name']+format_argspec(argspec) |
|
256 | call_line = oinfo['name']+format_argspec(argspec) | |
258 |
|
257 | |||
259 | # Now get docstring. |
|
258 | # Now get docstring. | |
260 | # The priority is: call docstring, constructor docstring, main one. |
|
259 | # The priority is: call docstring, constructor docstring, main one. | |
261 | doc = oinfo.get('call_docstring') |
|
260 | doc = oinfo.get('call_docstring') | |
262 | if doc is None: |
|
261 | if doc is None: | |
263 | doc = oinfo.get('init_docstring') |
|
262 | doc = oinfo.get('init_docstring') | |
264 | if doc is None: |
|
263 | if doc is None: | |
265 | doc = oinfo.get('docstring','') |
|
264 | doc = oinfo.get('docstring','') | |
266 |
|
265 | |||
267 | return call_line, doc |
|
266 | return call_line, doc | |
268 |
|
267 | |||
269 |
|
268 | |||
270 | def _get_wrapped(obj): |
|
269 | def _get_wrapped(obj): | |
271 | """Get the original object if wrapped in one or more @decorators |
|
270 | """Get the original object if wrapped in one or more @decorators | |
272 |
|
271 | |||
273 | Some objects automatically construct similar objects on any unrecognised |
|
272 | Some objects automatically construct similar objects on any unrecognised | |
274 | attribute access (e.g. unittest.mock.call). To protect against infinite loops, |
|
273 | attribute access (e.g. unittest.mock.call). To protect against infinite loops, | |
275 | this will arbitrarily cut off after 100 levels of obj.__wrapped__ |
|
274 | this will arbitrarily cut off after 100 levels of obj.__wrapped__ | |
276 | attribute access. --TK, Jan 2016 |
|
275 | attribute access. --TK, Jan 2016 | |
277 | """ |
|
276 | """ | |
278 | orig_obj = obj |
|
277 | orig_obj = obj | |
279 | i = 0 |
|
278 | i = 0 | |
280 | while safe_hasattr(obj, '__wrapped__'): |
|
279 | while safe_hasattr(obj, '__wrapped__'): | |
281 | obj = obj.__wrapped__ |
|
280 | obj = obj.__wrapped__ | |
282 | i += 1 |
|
281 | i += 1 | |
283 | if i > 100: |
|
282 | if i > 100: | |
284 | # __wrapped__ is probably a lie, so return the thing we started with |
|
283 | # __wrapped__ is probably a lie, so return the thing we started with | |
285 | return orig_obj |
|
284 | return orig_obj | |
286 | return obj |
|
285 | return obj | |
287 |
|
286 | |||
288 | def find_file(obj) -> str: |
|
287 | def find_file(obj) -> str: | |
289 | """Find the absolute path to the file where an object was defined. |
|
288 | """Find the absolute path to the file where an object was defined. | |
290 |
|
289 | |||
291 | This is essentially a robust wrapper around `inspect.getabsfile`. |
|
290 | This is essentially a robust wrapper around `inspect.getabsfile`. | |
292 |
|
291 | |||
293 | Returns None if no file can be found. |
|
292 | Returns None if no file can be found. | |
294 |
|
293 | |||
295 | Parameters |
|
294 | Parameters | |
296 | ---------- |
|
295 | ---------- | |
297 | obj : any Python object |
|
296 | obj : any Python object | |
298 |
|
297 | |||
299 | Returns |
|
298 | Returns | |
300 | ------- |
|
299 | ------- | |
301 | fname : str |
|
300 | fname : str | |
302 | The absolute path to the file where the object was defined. |
|
301 | The absolute path to the file where the object was defined. | |
303 | """ |
|
302 | """ | |
304 | obj = _get_wrapped(obj) |
|
303 | obj = _get_wrapped(obj) | |
305 |
|
304 | |||
306 | fname = None |
|
305 | fname = None | |
307 | try: |
|
306 | try: | |
308 | fname = inspect.getabsfile(obj) |
|
307 | fname = inspect.getabsfile(obj) | |
309 | except TypeError: |
|
308 | except TypeError: | |
310 | # For an instance, the file that matters is where its class was |
|
309 | # For an instance, the file that matters is where its class was | |
311 | # declared. |
|
310 | # declared. | |
312 | if hasattr(obj, '__class__'): |
|
311 | if hasattr(obj, '__class__'): | |
313 | try: |
|
312 | try: | |
314 | fname = inspect.getabsfile(obj.__class__) |
|
313 | fname = inspect.getabsfile(obj.__class__) | |
315 | except TypeError: |
|
314 | except TypeError: | |
316 | # Can happen for builtins |
|
315 | # Can happen for builtins | |
317 | pass |
|
316 | pass | |
318 | except: |
|
317 | except: | |
319 | pass |
|
318 | pass | |
320 | return cast_unicode(fname) |
|
319 | return cast_unicode(fname) | |
321 |
|
320 | |||
322 |
|
321 | |||
323 | def find_source_lines(obj): |
|
322 | def find_source_lines(obj): | |
324 | """Find the line number in a file where an object was defined. |
|
323 | """Find the line number in a file where an object was defined. | |
325 |
|
324 | |||
326 | This is essentially a robust wrapper around `inspect.getsourcelines`. |
|
325 | This is essentially a robust wrapper around `inspect.getsourcelines`. | |
327 |
|
326 | |||
328 | Returns None if no file can be found. |
|
327 | Returns None if no file can be found. | |
329 |
|
328 | |||
330 | Parameters |
|
329 | Parameters | |
331 | ---------- |
|
330 | ---------- | |
332 | obj : any Python object |
|
331 | obj : any Python object | |
333 |
|
332 | |||
334 | Returns |
|
333 | Returns | |
335 | ------- |
|
334 | ------- | |
336 | lineno : int |
|
335 | lineno : int | |
337 | The line number where the object definition starts. |
|
336 | The line number where the object definition starts. | |
338 | """ |
|
337 | """ | |
339 | obj = _get_wrapped(obj) |
|
338 | obj = _get_wrapped(obj) | |
340 |
|
339 | |||
341 | try: |
|
340 | try: | |
342 | try: |
|
341 | try: | |
343 | lineno = inspect.getsourcelines(obj)[1] |
|
342 | lineno = inspect.getsourcelines(obj)[1] | |
344 | except TypeError: |
|
343 | except TypeError: | |
345 | # For instances, try the class object like getsource() does |
|
344 | # For instances, try the class object like getsource() does | |
346 | if hasattr(obj, '__class__'): |
|
345 | if hasattr(obj, '__class__'): | |
347 | lineno = inspect.getsourcelines(obj.__class__)[1] |
|
346 | lineno = inspect.getsourcelines(obj.__class__)[1] | |
348 | else: |
|
347 | else: | |
349 | lineno = None |
|
348 | lineno = None | |
350 | except: |
|
349 | except: | |
351 | return None |
|
350 | return None | |
352 |
|
351 | |||
353 | return lineno |
|
352 | return lineno | |
354 |
|
353 | |||
355 | class Inspector(Colorable): |
|
354 | class Inspector(Colorable): | |
356 |
|
355 | |||
357 | def __init__(self, color_table=InspectColors, |
|
356 | def __init__(self, color_table=InspectColors, | |
358 | code_color_table=PyColorize.ANSICodeColors, |
|
357 | code_color_table=PyColorize.ANSICodeColors, | |
359 | scheme=None, |
|
358 | scheme=None, | |
360 | str_detail_level=0, |
|
359 | str_detail_level=0, | |
361 | parent=None, config=None): |
|
360 | parent=None, config=None): | |
362 | super(Inspector, self).__init__(parent=parent, config=config) |
|
361 | super(Inspector, self).__init__(parent=parent, config=config) | |
363 | self.color_table = color_table |
|
362 | self.color_table = color_table | |
364 | self.parser = PyColorize.Parser(out='str', parent=self, style=scheme) |
|
363 | self.parser = PyColorize.Parser(out='str', parent=self, style=scheme) | |
365 | self.format = self.parser.format |
|
364 | self.format = self.parser.format | |
366 | self.str_detail_level = str_detail_level |
|
365 | self.str_detail_level = str_detail_level | |
367 | self.set_active_scheme(scheme) |
|
366 | self.set_active_scheme(scheme) | |
368 |
|
367 | |||
369 | def _getdef(self,obj,oname='') -> Union[str,None]: |
|
368 | def _getdef(self,obj,oname='') -> Union[str,None]: | |
370 | """Return the call signature for any callable object. |
|
369 | """Return the call signature for any callable object. | |
371 |
|
370 | |||
372 | If any exception is generated, None is returned instead and the |
|
371 | If any exception is generated, None is returned instead and the | |
373 | exception is suppressed.""" |
|
372 | exception is suppressed.""" | |
374 | try: |
|
373 | try: | |
375 | return _render_signature(signature(obj), oname) |
|
374 | return _render_signature(signature(obj), oname) | |
376 | except: |
|
375 | except: | |
377 | return None |
|
376 | return None | |
378 |
|
377 | |||
379 | def __head(self,h) -> str: |
|
378 | def __head(self,h) -> str: | |
380 | """Return a header string with proper colors.""" |
|
379 | """Return a header string with proper colors.""" | |
381 | return '%s%s%s' % (self.color_table.active_colors.header,h, |
|
380 | return '%s%s%s' % (self.color_table.active_colors.header,h, | |
382 | self.color_table.active_colors.normal) |
|
381 | self.color_table.active_colors.normal) | |
383 |
|
382 | |||
384 | def set_active_scheme(self, scheme): |
|
383 | def set_active_scheme(self, scheme): | |
385 | if scheme is not None: |
|
384 | if scheme is not None: | |
386 | self.color_table.set_active_scheme(scheme) |
|
385 | self.color_table.set_active_scheme(scheme) | |
387 | self.parser.color_table.set_active_scheme(scheme) |
|
386 | self.parser.color_table.set_active_scheme(scheme) | |
388 |
|
387 | |||
389 | def noinfo(self, msg, oname): |
|
388 | def noinfo(self, msg, oname): | |
390 | """Generic message when no information is found.""" |
|
389 | """Generic message when no information is found.""" | |
391 | print('No %s found' % msg, end=' ') |
|
390 | print('No %s found' % msg, end=' ') | |
392 | if oname: |
|
391 | if oname: | |
393 | print('for %s' % oname) |
|
392 | print('for %s' % oname) | |
394 | else: |
|
393 | else: | |
395 | print() |
|
394 | print() | |
396 |
|
395 | |||
397 | def pdef(self, obj, oname=''): |
|
396 | def pdef(self, obj, oname=''): | |
398 | """Print the call signature for any callable object. |
|
397 | """Print the call signature for any callable object. | |
399 |
|
398 | |||
400 | If the object is a class, print the constructor information.""" |
|
399 | If the object is a class, print the constructor information.""" | |
401 |
|
400 | |||
402 | if not callable(obj): |
|
401 | if not callable(obj): | |
403 | print('Object is not callable.') |
|
402 | print('Object is not callable.') | |
404 | return |
|
403 | return | |
405 |
|
404 | |||
406 | header = '' |
|
405 | header = '' | |
407 |
|
406 | |||
408 | if inspect.isclass(obj): |
|
407 | if inspect.isclass(obj): | |
409 | header = self.__head('Class constructor information:\n') |
|
408 | header = self.__head('Class constructor information:\n') | |
410 |
|
409 | |||
411 |
|
410 | |||
412 | output = self._getdef(obj,oname) |
|
411 | output = self._getdef(obj,oname) | |
413 | if output is None: |
|
412 | if output is None: | |
414 | self.noinfo('definition header',oname) |
|
413 | self.noinfo('definition header',oname) | |
415 | else: |
|
414 | else: | |
416 | print(header,self.format(output), end=' ') |
|
415 | print(header,self.format(output), end=' ') | |
417 |
|
416 | |||
418 | # In Python 3, all classes are new-style, so they all have __init__. |
|
417 | # In Python 3, all classes are new-style, so they all have __init__. | |
419 | @skip_doctest |
|
418 | @skip_doctest | |
420 | def pdoc(self, obj, oname='', formatter=None): |
|
419 | def pdoc(self, obj, oname='', formatter=None): | |
421 | """Print the docstring for any object. |
|
420 | """Print the docstring for any object. | |
422 |
|
421 | |||
423 | Optional: |
|
422 | Optional: | |
424 | -formatter: a function to run the docstring through for specially |
|
423 | -formatter: a function to run the docstring through for specially | |
425 | formatted docstrings. |
|
424 | formatted docstrings. | |
426 |
|
425 | |||
427 | Examples |
|
426 | Examples | |
428 | -------- |
|
427 | -------- | |
429 |
|
428 | |||
430 | In [1]: class NoInit: |
|
429 | In [1]: class NoInit: | |
431 | ...: pass |
|
430 | ...: pass | |
432 |
|
431 | |||
433 | In [2]: class NoDoc: |
|
432 | In [2]: class NoDoc: | |
434 | ...: def __init__(self): |
|
433 | ...: def __init__(self): | |
435 | ...: pass |
|
434 | ...: pass | |
436 |
|
435 | |||
437 | In [3]: %pdoc NoDoc |
|
436 | In [3]: %pdoc NoDoc | |
438 | No documentation found for NoDoc |
|
437 | No documentation found for NoDoc | |
439 |
|
438 | |||
440 | In [4]: %pdoc NoInit |
|
439 | In [4]: %pdoc NoInit | |
441 | No documentation found for NoInit |
|
440 | No documentation found for NoInit | |
442 |
|
441 | |||
443 | In [5]: obj = NoInit() |
|
442 | In [5]: obj = NoInit() | |
444 |
|
443 | |||
445 | In [6]: %pdoc obj |
|
444 | In [6]: %pdoc obj | |
446 | No documentation found for obj |
|
445 | No documentation found for obj | |
447 |
|
446 | |||
448 | In [5]: obj2 = NoDoc() |
|
447 | In [5]: obj2 = NoDoc() | |
449 |
|
448 | |||
450 | In [6]: %pdoc obj2 |
|
449 | In [6]: %pdoc obj2 | |
451 | No documentation found for obj2 |
|
450 | No documentation found for obj2 | |
452 | """ |
|
451 | """ | |
453 |
|
452 | |||
454 | head = self.__head # For convenience |
|
453 | head = self.__head # For convenience | |
455 | lines = [] |
|
454 | lines = [] | |
456 | ds = getdoc(obj) |
|
455 | ds = getdoc(obj) | |
457 | if formatter: |
|
456 | if formatter: | |
458 | ds = formatter(ds).get('plain/text', ds) |
|
457 | ds = formatter(ds).get('plain/text', ds) | |
459 | if ds: |
|
458 | if ds: | |
460 | lines.append(head("Class docstring:")) |
|
459 | lines.append(head("Class docstring:")) | |
461 | lines.append(indent(ds)) |
|
460 | lines.append(indent(ds)) | |
462 | if inspect.isclass(obj) and hasattr(obj, '__init__'): |
|
461 | if inspect.isclass(obj) and hasattr(obj, '__init__'): | |
463 | init_ds = getdoc(obj.__init__) |
|
462 | init_ds = getdoc(obj.__init__) | |
464 | if init_ds is not None: |
|
463 | if init_ds is not None: | |
465 | lines.append(head("Init docstring:")) |
|
464 | lines.append(head("Init docstring:")) | |
466 | lines.append(indent(init_ds)) |
|
465 | lines.append(indent(init_ds)) | |
467 | elif hasattr(obj,'__call__'): |
|
466 | elif hasattr(obj,'__call__'): | |
468 | call_ds = getdoc(obj.__call__) |
|
467 | call_ds = getdoc(obj.__call__) | |
469 | if call_ds: |
|
468 | if call_ds: | |
470 | lines.append(head("Call docstring:")) |
|
469 | lines.append(head("Call docstring:")) | |
471 | lines.append(indent(call_ds)) |
|
470 | lines.append(indent(call_ds)) | |
472 |
|
471 | |||
473 | if not lines: |
|
472 | if not lines: | |
474 | self.noinfo('documentation',oname) |
|
473 | self.noinfo('documentation',oname) | |
475 | else: |
|
474 | else: | |
476 | page.page('\n'.join(lines)) |
|
475 | page.page('\n'.join(lines)) | |
477 |
|
476 | |||
478 | def psource(self, obj, oname=''): |
|
477 | def psource(self, obj, oname=''): | |
479 | """Print the source code for an object.""" |
|
478 | """Print the source code for an object.""" | |
480 |
|
479 | |||
481 | # Flush the source cache because inspect can return out-of-date source |
|
480 | # Flush the source cache because inspect can return out-of-date source | |
482 | linecache.checkcache() |
|
481 | linecache.checkcache() | |
483 | try: |
|
482 | try: | |
484 | src = getsource(obj, oname=oname) |
|
483 | src = getsource(obj, oname=oname) | |
485 | except Exception: |
|
484 | except Exception: | |
486 | src = None |
|
485 | src = None | |
487 |
|
486 | |||
488 | if src is None: |
|
487 | if src is None: | |
489 | self.noinfo('source', oname) |
|
488 | self.noinfo('source', oname) | |
490 | else: |
|
489 | else: | |
491 | page.page(self.format(src)) |
|
490 | page.page(self.format(src)) | |
492 |
|
491 | |||
493 | def pfile(self, obj, oname=''): |
|
492 | def pfile(self, obj, oname=''): | |
494 | """Show the whole file where an object was defined.""" |
|
493 | """Show the whole file where an object was defined.""" | |
495 |
|
494 | |||
496 | lineno = find_source_lines(obj) |
|
495 | lineno = find_source_lines(obj) | |
497 | if lineno is None: |
|
496 | if lineno is None: | |
498 | self.noinfo('file', oname) |
|
497 | self.noinfo('file', oname) | |
499 | return |
|
498 | return | |
500 |
|
499 | |||
501 | ofile = find_file(obj) |
|
500 | ofile = find_file(obj) | |
502 | # run contents of file through pager starting at line where the object |
|
501 | # run contents of file through pager starting at line where the object | |
503 | # is defined, as long as the file isn't binary and is actually on the |
|
502 | # is defined, as long as the file isn't binary and is actually on the | |
504 | # filesystem. |
|
503 | # filesystem. | |
505 | if ofile.endswith(('.so', '.dll', '.pyd')): |
|
504 | if ofile.endswith(('.so', '.dll', '.pyd')): | |
506 | print('File %r is binary, not printing.' % ofile) |
|
505 | print('File %r is binary, not printing.' % ofile) | |
507 | elif not os.path.isfile(ofile): |
|
506 | elif not os.path.isfile(ofile): | |
508 | print('File %r does not exist, not printing.' % ofile) |
|
507 | print('File %r does not exist, not printing.' % ofile) | |
509 | else: |
|
508 | else: | |
510 | # Print only text files, not extension binaries. Note that |
|
509 | # Print only text files, not extension binaries. Note that | |
511 | # getsourcelines returns lineno with 1-offset and page() uses |
|
510 | # getsourcelines returns lineno with 1-offset and page() uses | |
512 | # 0-offset, so we must adjust. |
|
511 | # 0-offset, so we must adjust. | |
513 | page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1) |
|
512 | page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1) | |
514 |
|
513 | |||
515 |
|
514 | |||
516 | def _mime_format(self, text:str, formatter=None) -> dict: |
|
515 | def _mime_format(self, text:str, formatter=None) -> dict: | |
517 | """Return a mime bundle representation of the input text. |
|
516 | """Return a mime bundle representation of the input text. | |
518 |
|
517 | |||
519 | - if `formatter` is None, the returned mime bundle has |
|
518 | - if `formatter` is None, the returned mime bundle has | |
520 | a `text/plain` field, with the input text. |
|
519 | a `text/plain` field, with the input text. | |
521 | a `text/html` field with a `<pre>` tag containing the input text. |
|
520 | a `text/html` field with a `<pre>` tag containing the input text. | |
522 |
|
521 | |||
523 | - if `formatter` is not None, it must be a callable transforming the |
|
522 | - if `formatter` is not None, it must be a callable transforming the | |
524 | input text into a mime bundle. Default values for `text/plain` and |
|
523 | input text into a mime bundle. Default values for `text/plain` and | |
525 | `text/html` representations are the ones described above. |
|
524 | `text/html` representations are the ones described above. | |
526 |
|
525 | |||
527 | Note: |
|
526 | Note: | |
528 |
|
527 | |||
529 | Formatters returning strings are supported but this behavior is deprecated. |
|
528 | Formatters returning strings are supported but this behavior is deprecated. | |
530 |
|
529 | |||
531 | """ |
|
530 | """ | |
532 | defaults = { |
|
531 | defaults = { | |
533 | 'text/plain': text, |
|
532 | 'text/plain': text, | |
534 | 'text/html': '<pre>' + text + '</pre>' |
|
533 | 'text/html': '<pre>' + text + '</pre>' | |
535 | } |
|
534 | } | |
536 |
|
535 | |||
537 | if formatter is None: |
|
536 | if formatter is None: | |
538 | return defaults |
|
537 | return defaults | |
539 | else: |
|
538 | else: | |
540 | formatted = formatter(text) |
|
539 | formatted = formatter(text) | |
541 |
|
540 | |||
542 | if not isinstance(formatted, dict): |
|
541 | if not isinstance(formatted, dict): | |
543 | # Handle the deprecated behavior of a formatter returning |
|
542 | # Handle the deprecated behavior of a formatter returning | |
544 | # a string instead of a mime bundle. |
|
543 | # a string instead of a mime bundle. | |
545 | return { |
|
544 | return { | |
546 | 'text/plain': formatted, |
|
545 | 'text/plain': formatted, | |
547 | 'text/html': '<pre>' + formatted + '</pre>' |
|
546 | 'text/html': '<pre>' + formatted + '</pre>' | |
548 | } |
|
547 | } | |
549 |
|
548 | |||
550 | else: |
|
549 | else: | |
551 | return dict(defaults, **formatted) |
|
550 | return dict(defaults, **formatted) | |
552 |
|
551 | |||
553 |
|
552 | |||
554 | def format_mime(self, bundle): |
|
553 | def format_mime(self, bundle): | |
555 |
|
554 | |||
556 | text_plain = bundle['text/plain'] |
|
555 | text_plain = bundle['text/plain'] | |
557 |
|
556 | |||
558 | text = '' |
|
557 | text = '' | |
559 | heads, bodies = list(zip(*text_plain)) |
|
558 | heads, bodies = list(zip(*text_plain)) | |
560 | _len = max(len(h) for h in heads) |
|
559 | _len = max(len(h) for h in heads) | |
561 |
|
560 | |||
562 | for head, body in zip(heads, bodies): |
|
561 | for head, body in zip(heads, bodies): | |
563 | body = body.strip('\n') |
|
562 | body = body.strip('\n') | |
564 | delim = '\n' if '\n' in body else ' ' |
|
563 | delim = '\n' if '\n' in body else ' ' | |
565 | text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n' |
|
564 | text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n' | |
566 |
|
565 | |||
567 | bundle['text/plain'] = text |
|
566 | bundle['text/plain'] = text | |
568 | return bundle |
|
567 | return bundle | |
569 |
|
568 | |||
570 | def _get_info(self, obj, oname='', formatter=None, info=None, detail_level=0): |
|
569 | def _get_info(self, obj, oname='', formatter=None, info=None, detail_level=0): | |
571 | """Retrieve an info dict and format it. |
|
570 | """Retrieve an info dict and format it. | |
572 |
|
571 | |||
573 | Parameters |
|
572 | Parameters | |
574 | ========== |
|
573 | ========== | |
575 |
|
574 | |||
576 | obj: any |
|
575 | obj: any | |
577 | Object to inspect and return info from |
|
576 | Object to inspect and return info from | |
578 | oname: str (default: ''): |
|
577 | oname: str (default: ''): | |
579 | Name of the variable pointing to `obj`. |
|
578 | Name of the variable pointing to `obj`. | |
580 | formatter: callable |
|
579 | formatter: callable | |
581 | info: |
|
580 | info: | |
582 | already computed information |
|
581 | already computed information | |
583 | detail_level: integer |
|
582 | detail_level: integer | |
584 | Granularity of detail level, if set to 1, give more information. |
|
583 | Granularity of detail level, if set to 1, give more information. | |
585 | """ |
|
584 | """ | |
586 |
|
585 | |||
587 | info = self._info(obj, oname=oname, info=info, detail_level=detail_level) |
|
586 | info = self._info(obj, oname=oname, info=info, detail_level=detail_level) | |
588 |
|
587 | |||
589 | _mime = { |
|
588 | _mime = { | |
590 | 'text/plain': [], |
|
589 | 'text/plain': [], | |
591 | 'text/html': '', |
|
590 | 'text/html': '', | |
592 | } |
|
591 | } | |
593 |
|
592 | |||
594 | def append_field(bundle, title:str, key:str, formatter=None): |
|
593 | def append_field(bundle, title:str, key:str, formatter=None): | |
595 | field = info[key] |
|
594 | field = info[key] | |
596 | if field is not None: |
|
595 | if field is not None: | |
597 | formatted_field = self._mime_format(field, formatter) |
|
596 | formatted_field = self._mime_format(field, formatter) | |
598 | bundle['text/plain'].append((title, formatted_field['text/plain'])) |
|
597 | bundle['text/plain'].append((title, formatted_field['text/plain'])) | |
599 | bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n' |
|
598 | bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n' | |
600 |
|
599 | |||
601 | def code_formatter(text): |
|
600 | def code_formatter(text): | |
602 | return { |
|
601 | return { | |
603 | 'text/plain': self.format(text), |
|
602 | 'text/plain': self.format(text), | |
604 | 'text/html': pylight(text) |
|
603 | 'text/html': pylight(text) | |
605 | } |
|
604 | } | |
606 |
|
605 | |||
607 | if info['isalias']: |
|
606 | if info['isalias']: | |
608 | append_field(_mime, 'Repr', 'string_form') |
|
607 | append_field(_mime, 'Repr', 'string_form') | |
609 |
|
608 | |||
610 | elif info['ismagic']: |
|
609 | elif info['ismagic']: | |
611 | if detail_level > 0: |
|
610 | if detail_level > 0: | |
612 | append_field(_mime, 'Source', 'source', code_formatter) |
|
611 | append_field(_mime, 'Source', 'source', code_formatter) | |
613 | else: |
|
612 | else: | |
614 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
613 | append_field(_mime, 'Docstring', 'docstring', formatter) | |
615 | append_field(_mime, 'File', 'file') |
|
614 | append_field(_mime, 'File', 'file') | |
616 |
|
615 | |||
617 | elif info['isclass'] or is_simple_callable(obj): |
|
616 | elif info['isclass'] or is_simple_callable(obj): | |
618 | # Functions, methods, classes |
|
617 | # Functions, methods, classes | |
619 | append_field(_mime, 'Signature', 'definition', code_formatter) |
|
618 | append_field(_mime, 'Signature', 'definition', code_formatter) | |
620 | append_field(_mime, 'Init signature', 'init_definition', code_formatter) |
|
619 | append_field(_mime, 'Init signature', 'init_definition', code_formatter) | |
621 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
620 | append_field(_mime, 'Docstring', 'docstring', formatter) | |
622 | if detail_level > 0 and info['source']: |
|
621 | if detail_level > 0 and info['source']: | |
623 | append_field(_mime, 'Source', 'source', code_formatter) |
|
622 | append_field(_mime, 'Source', 'source', code_formatter) | |
624 | else: |
|
623 | else: | |
625 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) |
|
624 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) | |
626 |
|
625 | |||
627 | append_field(_mime, 'File', 'file') |
|
626 | append_field(_mime, 'File', 'file') | |
628 | append_field(_mime, 'Type', 'type_name') |
|
627 | append_field(_mime, 'Type', 'type_name') | |
629 | append_field(_mime, 'Subclasses', 'subclasses') |
|
628 | append_field(_mime, 'Subclasses', 'subclasses') | |
630 |
|
629 | |||
631 | else: |
|
630 | else: | |
632 | # General Python objects |
|
631 | # General Python objects | |
633 | append_field(_mime, 'Signature', 'definition', code_formatter) |
|
632 | append_field(_mime, 'Signature', 'definition', code_formatter) | |
634 | append_field(_mime, 'Call signature', 'call_def', code_formatter) |
|
633 | append_field(_mime, 'Call signature', 'call_def', code_formatter) | |
635 | append_field(_mime, 'Type', 'type_name') |
|
634 | append_field(_mime, 'Type', 'type_name') | |
636 | append_field(_mime, 'String form', 'string_form') |
|
635 | append_field(_mime, 'String form', 'string_form') | |
637 |
|
636 | |||
638 | # Namespace |
|
637 | # Namespace | |
639 | if info['namespace'] != 'Interactive': |
|
638 | if info['namespace'] != 'Interactive': | |
640 | append_field(_mime, 'Namespace', 'namespace') |
|
639 | append_field(_mime, 'Namespace', 'namespace') | |
641 |
|
640 | |||
642 | append_field(_mime, 'Length', 'length') |
|
641 | append_field(_mime, 'Length', 'length') | |
643 | append_field(_mime, 'File', 'file') |
|
642 | append_field(_mime, 'File', 'file') | |
644 |
|
643 | |||
645 | # Source or docstring, depending on detail level and whether |
|
644 | # Source or docstring, depending on detail level and whether | |
646 | # source found. |
|
645 | # source found. | |
647 | if detail_level > 0 and info['source']: |
|
646 | if detail_level > 0 and info['source']: | |
648 | append_field(_mime, 'Source', 'source', code_formatter) |
|
647 | append_field(_mime, 'Source', 'source', code_formatter) | |
649 | else: |
|
648 | else: | |
650 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
649 | append_field(_mime, 'Docstring', 'docstring', formatter) | |
651 |
|
650 | |||
652 | append_field(_mime, 'Class docstring', 'class_docstring', formatter) |
|
651 | append_field(_mime, 'Class docstring', 'class_docstring', formatter) | |
653 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) |
|
652 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) | |
654 | append_field(_mime, 'Call docstring', 'call_docstring', formatter) |
|
653 | append_field(_mime, 'Call docstring', 'call_docstring', formatter) | |
655 |
|
654 | |||
656 |
|
655 | |||
657 | return self.format_mime(_mime) |
|
656 | return self.format_mime(_mime) | |
658 |
|
657 | |||
659 | def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0, enable_html_pager=True): |
|
658 | def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0, enable_html_pager=True): | |
660 | """Show detailed information about an object. |
|
659 | """Show detailed information about an object. | |
661 |
|
660 | |||
662 | Optional arguments: |
|
661 | Optional arguments: | |
663 |
|
662 | |||
664 | - oname: name of the variable pointing to the object. |
|
663 | - oname: name of the variable pointing to the object. | |
665 |
|
664 | |||
666 | - formatter: callable (optional) |
|
665 | - formatter: callable (optional) | |
667 | A special formatter for docstrings. |
|
666 | A special formatter for docstrings. | |
668 |
|
667 | |||
669 | The formatter is a callable that takes a string as an input |
|
668 | The formatter is a callable that takes a string as an input | |
670 | and returns either a formatted string or a mime type bundle |
|
669 | and returns either a formatted string or a mime type bundle | |
671 | in the form of a dictionary. |
|
670 | in the form of a dictionary. | |
672 |
|
671 | |||
673 | Although the support of custom formatter returning a string |
|
672 | Although the support of custom formatter returning a string | |
674 | instead of a mime type bundle is deprecated. |
|
673 | instead of a mime type bundle is deprecated. | |
675 |
|
674 | |||
676 | - info: a structure with some information fields which may have been |
|
675 | - info: a structure with some information fields which may have been | |
677 | precomputed already. |
|
676 | precomputed already. | |
678 |
|
677 | |||
679 | - detail_level: if set to 1, more information is given. |
|
678 | - detail_level: if set to 1, more information is given. | |
680 | """ |
|
679 | """ | |
681 | info = self._get_info(obj, oname, formatter, info, detail_level) |
|
680 | info = self._get_info(obj, oname, formatter, info, detail_level) | |
682 | if not enable_html_pager: |
|
681 | if not enable_html_pager: | |
683 | del info['text/html'] |
|
682 | del info['text/html'] | |
684 | page.page(info) |
|
683 | page.page(info) | |
685 |
|
684 | |||
686 | def info(self, obj, oname='', formatter=None, info=None, detail_level=0): |
|
685 | def info(self, obj, oname='', formatter=None, info=None, detail_level=0): | |
687 | """DEPRECATED. Compute a dict with detailed information about an object. |
|
686 | """DEPRECATED. Compute a dict with detailed information about an object. | |
688 | """ |
|
687 | """ | |
689 | if formatter is not None: |
|
688 | if formatter is not None: | |
690 | warnings.warn('The `formatter` keyword argument to `Inspector.info`' |
|
689 | warnings.warn('The `formatter` keyword argument to `Inspector.info`' | |
691 | 'is deprecated as of IPython 5.0 and will have no effects.', |
|
690 | 'is deprecated as of IPython 5.0 and will have no effects.', | |
692 | DeprecationWarning, stacklevel=2) |
|
691 | DeprecationWarning, stacklevel=2) | |
693 | return self._info(obj, oname=oname, info=info, detail_level=detail_level) |
|
692 | return self._info(obj, oname=oname, info=info, detail_level=detail_level) | |
694 |
|
693 | |||
695 | def _info(self, obj, oname='', info=None, detail_level=0) -> dict: |
|
694 | def _info(self, obj, oname='', info=None, detail_level=0) -> dict: | |
696 | """Compute a dict with detailed information about an object. |
|
695 | """Compute a dict with detailed information about an object. | |
697 |
|
696 | |||
698 | Parameters |
|
697 | Parameters | |
699 | ========== |
|
698 | ========== | |
700 |
|
699 | |||
701 | obj: any |
|
700 | obj: any | |
702 | An object to find information about |
|
701 | An object to find information about | |
703 | oname: str (default: ''): |
|
702 | oname: str (default: ''): | |
704 | Name of the variable pointing to `obj`. |
|
703 | Name of the variable pointing to `obj`. | |
705 | info: (default: None) |
|
704 | info: (default: None) | |
706 | A struct (dict like with attr access) with some information fields |
|
705 | A struct (dict like with attr access) with some information fields | |
707 | which may have been precomputed already. |
|
706 | which may have been precomputed already. | |
708 | detail_level: int (default:0) |
|
707 | detail_level: int (default:0) | |
709 | If set to 1, more information is given. |
|
708 | If set to 1, more information is given. | |
710 |
|
709 | |||
711 | Returns |
|
710 | Returns | |
712 | ======= |
|
711 | ======= | |
713 |
|
712 | |||
714 | An object info dict with known fields from `info_fields`. Keys are |
|
713 | An object info dict with known fields from `info_fields`. Keys are | |
715 | strings, values are string or None. |
|
714 | strings, values are string or None. | |
716 | """ |
|
715 | """ | |
717 |
|
716 | |||
718 | if info is None: |
|
717 | if info is None: | |
719 | ismagic = False |
|
718 | ismagic = False | |
720 | isalias = False |
|
719 | isalias = False | |
721 | ospace = '' |
|
720 | ospace = '' | |
722 | else: |
|
721 | else: | |
723 | ismagic = info.ismagic |
|
722 | ismagic = info.ismagic | |
724 | isalias = info.isalias |
|
723 | isalias = info.isalias | |
725 | ospace = info.namespace |
|
724 | ospace = info.namespace | |
726 |
|
725 | |||
727 | # Get docstring, special-casing aliases: |
|
726 | # Get docstring, special-casing aliases: | |
728 | if isalias: |
|
727 | if isalias: | |
729 | if not callable(obj): |
|
728 | if not callable(obj): | |
730 | try: |
|
729 | try: | |
731 | ds = "Alias to the system command:\n %s" % obj[1] |
|
730 | ds = "Alias to the system command:\n %s" % obj[1] | |
732 | except: |
|
731 | except: | |
733 | ds = "Alias: " + str(obj) |
|
732 | ds = "Alias: " + str(obj) | |
734 | else: |
|
733 | else: | |
735 | ds = "Alias to " + str(obj) |
|
734 | ds = "Alias to " + str(obj) | |
736 | if obj.__doc__: |
|
735 | if obj.__doc__: | |
737 | ds += "\nDocstring:\n" + obj.__doc__ |
|
736 | ds += "\nDocstring:\n" + obj.__doc__ | |
738 | else: |
|
737 | else: | |
739 | ds = getdoc(obj) |
|
738 | ds = getdoc(obj) | |
740 | if ds is None: |
|
739 | if ds is None: | |
741 | ds = '<no docstring>' |
|
740 | ds = '<no docstring>' | |
742 |
|
741 | |||
743 | # store output in a dict, we initialize it here and fill it as we go |
|
742 | # store output in a dict, we initialize it here and fill it as we go | |
744 | out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None) |
|
743 | out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic, subclasses=None) | |
745 |
|
744 | |||
746 | string_max = 200 # max size of strings to show (snipped if longer) |
|
745 | string_max = 200 # max size of strings to show (snipped if longer) | |
747 | shalf = int((string_max - 5) / 2) |
|
746 | shalf = int((string_max - 5) / 2) | |
748 |
|
747 | |||
749 | if ismagic: |
|
748 | if ismagic: | |
750 | out['type_name'] = 'Magic function' |
|
749 | out['type_name'] = 'Magic function' | |
751 | elif isalias: |
|
750 | elif isalias: | |
752 | out['type_name'] = 'System alias' |
|
751 | out['type_name'] = 'System alias' | |
753 | else: |
|
752 | else: | |
754 | out['type_name'] = type(obj).__name__ |
|
753 | out['type_name'] = type(obj).__name__ | |
755 |
|
754 | |||
756 | try: |
|
755 | try: | |
757 | bclass = obj.__class__ |
|
756 | bclass = obj.__class__ | |
758 | out['base_class'] = str(bclass) |
|
757 | out['base_class'] = str(bclass) | |
759 | except: |
|
758 | except: | |
760 | pass |
|
759 | pass | |
761 |
|
760 | |||
762 | # String form, but snip if too long in ? form (full in ??) |
|
761 | # String form, but snip if too long in ? form (full in ??) | |
763 | if detail_level >= self.str_detail_level: |
|
762 | if detail_level >= self.str_detail_level: | |
764 | try: |
|
763 | try: | |
765 | ostr = str(obj) |
|
764 | ostr = str(obj) | |
766 | str_head = 'string_form' |
|
765 | str_head = 'string_form' | |
767 | if not detail_level and len(ostr)>string_max: |
|
766 | if not detail_level and len(ostr)>string_max: | |
768 | ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] |
|
767 | ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] | |
769 | ostr = ("\n" + " " * len(str_head.expandtabs())).\ |
|
768 | ostr = ("\n" + " " * len(str_head.expandtabs())).\ | |
770 | join(q.strip() for q in ostr.split("\n")) |
|
769 | join(q.strip() for q in ostr.split("\n")) | |
771 | out[str_head] = ostr |
|
770 | out[str_head] = ostr | |
772 | except: |
|
771 | except: | |
773 | pass |
|
772 | pass | |
774 |
|
773 | |||
775 | if ospace: |
|
774 | if ospace: | |
776 | out['namespace'] = ospace |
|
775 | out['namespace'] = ospace | |
777 |
|
776 | |||
778 | # Length (for strings and lists) |
|
777 | # Length (for strings and lists) | |
779 | try: |
|
778 | try: | |
780 | out['length'] = str(len(obj)) |
|
779 | out['length'] = str(len(obj)) | |
781 | except Exception: |
|
780 | except Exception: | |
782 | pass |
|
781 | pass | |
783 |
|
782 | |||
784 | # Filename where object was defined |
|
783 | # Filename where object was defined | |
785 | binary_file = False |
|
784 | binary_file = False | |
786 | fname = find_file(obj) |
|
785 | fname = find_file(obj) | |
787 | if fname is None: |
|
786 | if fname is None: | |
788 | # if anything goes wrong, we don't want to show source, so it's as |
|
787 | # if anything goes wrong, we don't want to show source, so it's as | |
789 | # if the file was binary |
|
788 | # if the file was binary | |
790 | binary_file = True |
|
789 | binary_file = True | |
791 | else: |
|
790 | else: | |
792 | if fname.endswith(('.so', '.dll', '.pyd')): |
|
791 | if fname.endswith(('.so', '.dll', '.pyd')): | |
793 | binary_file = True |
|
792 | binary_file = True | |
794 | elif fname.endswith('<string>'): |
|
793 | elif fname.endswith('<string>'): | |
795 | fname = 'Dynamically generated function. No source code available.' |
|
794 | fname = 'Dynamically generated function. No source code available.' | |
796 | out['file'] = compress_user(fname) |
|
795 | out['file'] = compress_user(fname) | |
797 |
|
796 | |||
798 | # Original source code for a callable, class or property. |
|
797 | # Original source code for a callable, class or property. | |
799 | if detail_level: |
|
798 | if detail_level: | |
800 | # Flush the source cache because inspect can return out-of-date |
|
799 | # Flush the source cache because inspect can return out-of-date | |
801 | # source |
|
800 | # source | |
802 | linecache.checkcache() |
|
801 | linecache.checkcache() | |
803 | try: |
|
802 | try: | |
804 | if isinstance(obj, property) or not binary_file: |
|
803 | if isinstance(obj, property) or not binary_file: | |
805 | src = getsource(obj, oname) |
|
804 | src = getsource(obj, oname) | |
806 | if src is not None: |
|
805 | if src is not None: | |
807 | src = src.rstrip() |
|
806 | src = src.rstrip() | |
808 | out['source'] = src |
|
807 | out['source'] = src | |
809 |
|
808 | |||
810 | except Exception: |
|
809 | except Exception: | |
811 | pass |
|
810 | pass | |
812 |
|
811 | |||
813 | # Add docstring only if no source is to be shown (avoid repetitions). |
|
812 | # Add docstring only if no source is to be shown (avoid repetitions). | |
814 | if ds and not self._source_contains_docstring(out.get('source'), ds): |
|
813 | if ds and not self._source_contains_docstring(out.get('source'), ds): | |
815 | out['docstring'] = ds |
|
814 | out['docstring'] = ds | |
816 |
|
815 | |||
817 | # Constructor docstring for classes |
|
816 | # Constructor docstring for classes | |
818 | if inspect.isclass(obj): |
|
817 | if inspect.isclass(obj): | |
819 | out['isclass'] = True |
|
818 | out['isclass'] = True | |
820 |
|
819 | |||
821 | # get the init signature: |
|
820 | # get the init signature: | |
822 | try: |
|
821 | try: | |
823 | init_def = self._getdef(obj, oname) |
|
822 | init_def = self._getdef(obj, oname) | |
824 | except AttributeError: |
|
823 | except AttributeError: | |
825 | init_def = None |
|
824 | init_def = None | |
826 |
|
825 | |||
827 | # get the __init__ docstring |
|
826 | # get the __init__ docstring | |
828 | try: |
|
827 | try: | |
829 | obj_init = obj.__init__ |
|
828 | obj_init = obj.__init__ | |
830 | except AttributeError: |
|
829 | except AttributeError: | |
831 | init_ds = None |
|
830 | init_ds = None | |
832 | else: |
|
831 | else: | |
833 | if init_def is None: |
|
832 | if init_def is None: | |
834 | # Get signature from init if top-level sig failed. |
|
833 | # Get signature from init if top-level sig failed. | |
835 | # Can happen for built-in types (list, etc.). |
|
834 | # Can happen for built-in types (list, etc.). | |
836 | try: |
|
835 | try: | |
837 | init_def = self._getdef(obj_init, oname) |
|
836 | init_def = self._getdef(obj_init, oname) | |
838 | except AttributeError: |
|
837 | except AttributeError: | |
839 | pass |
|
838 | pass | |
840 | init_ds = getdoc(obj_init) |
|
839 | init_ds = getdoc(obj_init) | |
841 | # Skip Python's auto-generated docstrings |
|
840 | # Skip Python's auto-generated docstrings | |
842 | if init_ds == _object_init_docstring: |
|
841 | if init_ds == _object_init_docstring: | |
843 | init_ds = None |
|
842 | init_ds = None | |
844 |
|
843 | |||
845 | if init_def: |
|
844 | if init_def: | |
846 | out['init_definition'] = init_def |
|
845 | out['init_definition'] = init_def | |
847 |
|
846 | |||
848 | if init_ds: |
|
847 | if init_ds: | |
849 | out['init_docstring'] = init_ds |
|
848 | out['init_docstring'] = init_ds | |
850 |
|
849 | |||
851 | names = [sub.__name__ for sub in type.__subclasses__(obj)] |
|
850 | names = [sub.__name__ for sub in type.__subclasses__(obj)] | |
852 | if len(names) < 10: |
|
851 | if len(names) < 10: | |
853 | all_names = ', '.join(names) |
|
852 | all_names = ', '.join(names) | |
854 | else: |
|
853 | else: | |
855 | all_names = ', '.join(names[:10]+['...']) |
|
854 | all_names = ', '.join(names[:10]+['...']) | |
856 | out['subclasses'] = all_names |
|
855 | out['subclasses'] = all_names | |
857 | # and class docstring for instances: |
|
856 | # and class docstring for instances: | |
858 | else: |
|
857 | else: | |
859 | # reconstruct the function definition and print it: |
|
858 | # reconstruct the function definition and print it: | |
860 | defln = self._getdef(obj, oname) |
|
859 | defln = self._getdef(obj, oname) | |
861 | if defln: |
|
860 | if defln: | |
862 | out['definition'] = defln |
|
861 | out['definition'] = defln | |
863 |
|
862 | |||
864 | # First, check whether the instance docstring is identical to the |
|
863 | # First, check whether the instance docstring is identical to the | |
865 | # class one, and print it separately if they don't coincide. In |
|
864 | # class one, and print it separately if they don't coincide. In | |
866 | # most cases they will, but it's nice to print all the info for |
|
865 | # most cases they will, but it's nice to print all the info for | |
867 | # objects which use instance-customized docstrings. |
|
866 | # objects which use instance-customized docstrings. | |
868 | if ds: |
|
867 | if ds: | |
869 | try: |
|
868 | try: | |
870 | cls = getattr(obj,'__class__') |
|
869 | cls = getattr(obj,'__class__') | |
871 | except: |
|
870 | except: | |
872 | class_ds = None |
|
871 | class_ds = None | |
873 | else: |
|
872 | else: | |
874 | class_ds = getdoc(cls) |
|
873 | class_ds = getdoc(cls) | |
875 | # Skip Python's auto-generated docstrings |
|
874 | # Skip Python's auto-generated docstrings | |
876 | if class_ds in _builtin_type_docstrings: |
|
875 | if class_ds in _builtin_type_docstrings: | |
877 | class_ds = None |
|
876 | class_ds = None | |
878 | if class_ds and ds != class_ds: |
|
877 | if class_ds and ds != class_ds: | |
879 | out['class_docstring'] = class_ds |
|
878 | out['class_docstring'] = class_ds | |
880 |
|
879 | |||
881 | # Next, try to show constructor docstrings |
|
880 | # Next, try to show constructor docstrings | |
882 | try: |
|
881 | try: | |
883 | init_ds = getdoc(obj.__init__) |
|
882 | init_ds = getdoc(obj.__init__) | |
884 | # Skip Python's auto-generated docstrings |
|
883 | # Skip Python's auto-generated docstrings | |
885 | if init_ds == _object_init_docstring: |
|
884 | if init_ds == _object_init_docstring: | |
886 | init_ds = None |
|
885 | init_ds = None | |
887 | except AttributeError: |
|
886 | except AttributeError: | |
888 | init_ds = None |
|
887 | init_ds = None | |
889 | if init_ds: |
|
888 | if init_ds: | |
890 | out['init_docstring'] = init_ds |
|
889 | out['init_docstring'] = init_ds | |
891 |
|
890 | |||
892 | # Call form docstring for callable instances |
|
891 | # Call form docstring for callable instances | |
893 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
892 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): | |
894 | call_def = self._getdef(obj.__call__, oname) |
|
893 | call_def = self._getdef(obj.__call__, oname) | |
895 | if call_def and (call_def != out.get('definition')): |
|
894 | if call_def and (call_def != out.get('definition')): | |
896 | # it may never be the case that call def and definition differ, |
|
895 | # it may never be the case that call def and definition differ, | |
897 | # but don't include the same signature twice |
|
896 | # but don't include the same signature twice | |
898 | out['call_def'] = call_def |
|
897 | out['call_def'] = call_def | |
899 | call_ds = getdoc(obj.__call__) |
|
898 | call_ds = getdoc(obj.__call__) | |
900 | # Skip Python's auto-generated docstrings |
|
899 | # Skip Python's auto-generated docstrings | |
901 | if call_ds == _func_call_docstring: |
|
900 | if call_ds == _func_call_docstring: | |
902 | call_ds = None |
|
901 | call_ds = None | |
903 | if call_ds: |
|
902 | if call_ds: | |
904 | out['call_docstring'] = call_ds |
|
903 | out['call_docstring'] = call_ds | |
905 |
|
904 | |||
906 | return object_info(**out) |
|
905 | return object_info(**out) | |
907 |
|
906 | |||
908 | @staticmethod |
|
907 | @staticmethod | |
909 | def _source_contains_docstring(src, doc): |
|
908 | def _source_contains_docstring(src, doc): | |
910 | """ |
|
909 | """ | |
911 | Check whether the source *src* contains the docstring *doc*. |
|
910 | Check whether the source *src* contains the docstring *doc*. | |
912 |
|
911 | |||
913 | This is is helper function to skip displaying the docstring if the |
|
912 | This is is helper function to skip displaying the docstring if the | |
914 | source already contains it, avoiding repetition of information. |
|
913 | source already contains it, avoiding repetition of information. | |
915 | """ |
|
914 | """ | |
916 | try: |
|
915 | try: | |
917 | def_node, = ast.parse(dedent(src)).body |
|
916 | def_node, = ast.parse(dedent(src)).body | |
918 | return ast.get_docstring(def_node) == doc |
|
917 | return ast.get_docstring(def_node) == doc | |
919 | except Exception: |
|
918 | except Exception: | |
920 | # The source can become invalid or even non-existent (because it |
|
919 | # The source can become invalid or even non-existent (because it | |
921 | # is re-fetched from the source file) so the above code fail in |
|
920 | # is re-fetched from the source file) so the above code fail in | |
922 | # arbitrary ways. |
|
921 | # arbitrary ways. | |
923 | return False |
|
922 | return False | |
924 |
|
923 | |||
925 | def psearch(self,pattern,ns_table,ns_search=[], |
|
924 | def psearch(self,pattern,ns_table,ns_search=[], | |
926 | ignore_case=False,show_all=False, *, list_types=False): |
|
925 | ignore_case=False,show_all=False, *, list_types=False): | |
927 | """Search namespaces with wildcards for objects. |
|
926 | """Search namespaces with wildcards for objects. | |
928 |
|
927 | |||
929 | Arguments: |
|
928 | Arguments: | |
930 |
|
929 | |||
931 | - pattern: string containing shell-like wildcards to use in namespace |
|
930 | - pattern: string containing shell-like wildcards to use in namespace | |
932 | searches and optionally a type specification to narrow the search to |
|
931 | searches and optionally a type specification to narrow the search to | |
933 | objects of that type. |
|
932 | objects of that type. | |
934 |
|
933 | |||
935 | - ns_table: dict of name->namespaces for search. |
|
934 | - ns_table: dict of name->namespaces for search. | |
936 |
|
935 | |||
937 | Optional arguments: |
|
936 | Optional arguments: | |
938 |
|
937 | |||
939 | - ns_search: list of namespace names to include in search. |
|
938 | - ns_search: list of namespace names to include in search. | |
940 |
|
939 | |||
941 | - ignore_case(False): make the search case-insensitive. |
|
940 | - ignore_case(False): make the search case-insensitive. | |
942 |
|
941 | |||
943 | - show_all(False): show all names, including those starting with |
|
942 | - show_all(False): show all names, including those starting with | |
944 | underscores. |
|
943 | underscores. | |
945 |
|
944 | |||
946 | - list_types(False): list all available object types for object matching. |
|
945 | - list_types(False): list all available object types for object matching. | |
947 | """ |
|
946 | """ | |
948 | #print 'ps pattern:<%r>' % pattern # dbg |
|
947 | #print 'ps pattern:<%r>' % pattern # dbg | |
949 |
|
948 | |||
950 | # defaults |
|
949 | # defaults | |
951 | type_pattern = 'all' |
|
950 | type_pattern = 'all' | |
952 | filter = '' |
|
951 | filter = '' | |
953 |
|
952 | |||
954 | # list all object types |
|
953 | # list all object types | |
955 | if list_types: |
|
954 | if list_types: | |
956 | page.page('\n'.join(sorted(typestr2type))) |
|
955 | page.page('\n'.join(sorted(typestr2type))) | |
957 | return |
|
956 | return | |
958 |
|
957 | |||
959 | cmds = pattern.split() |
|
958 | cmds = pattern.split() | |
960 | len_cmds = len(cmds) |
|
959 | len_cmds = len(cmds) | |
961 | if len_cmds == 1: |
|
960 | if len_cmds == 1: | |
962 | # Only filter pattern given |
|
961 | # Only filter pattern given | |
963 | filter = cmds[0] |
|
962 | filter = cmds[0] | |
964 | elif len_cmds == 2: |
|
963 | elif len_cmds == 2: | |
965 | # Both filter and type specified |
|
964 | # Both filter and type specified | |
966 | filter,type_pattern = cmds |
|
965 | filter,type_pattern = cmds | |
967 | else: |
|
966 | else: | |
968 | raise ValueError('invalid argument string for psearch: <%s>' % |
|
967 | raise ValueError('invalid argument string for psearch: <%s>' % | |
969 | pattern) |
|
968 | pattern) | |
970 |
|
969 | |||
971 | # filter search namespaces |
|
970 | # filter search namespaces | |
972 | for name in ns_search: |
|
971 | for name in ns_search: | |
973 | if name not in ns_table: |
|
972 | if name not in ns_table: | |
974 | raise ValueError('invalid namespace <%s>. Valid names: %s' % |
|
973 | raise ValueError('invalid namespace <%s>. Valid names: %s' % | |
975 | (name,ns_table.keys())) |
|
974 | (name,ns_table.keys())) | |
976 |
|
975 | |||
977 | #print 'type_pattern:',type_pattern # dbg |
|
976 | #print 'type_pattern:',type_pattern # dbg | |
978 | search_result, namespaces_seen = set(), set() |
|
977 | search_result, namespaces_seen = set(), set() | |
979 | for ns_name in ns_search: |
|
978 | for ns_name in ns_search: | |
980 | ns = ns_table[ns_name] |
|
979 | ns = ns_table[ns_name] | |
981 | # Normally, locals and globals are the same, so we just check one. |
|
980 | # Normally, locals and globals are the same, so we just check one. | |
982 | if id(ns) in namespaces_seen: |
|
981 | if id(ns) in namespaces_seen: | |
983 | continue |
|
982 | continue | |
984 | namespaces_seen.add(id(ns)) |
|
983 | namespaces_seen.add(id(ns)) | |
985 | tmp_res = list_namespace(ns, type_pattern, filter, |
|
984 | tmp_res = list_namespace(ns, type_pattern, filter, | |
986 | ignore_case=ignore_case, show_all=show_all) |
|
985 | ignore_case=ignore_case, show_all=show_all) | |
987 | search_result.update(tmp_res) |
|
986 | search_result.update(tmp_res) | |
988 |
|
987 | |||
989 | page.page('\n'.join(sorted(search_result))) |
|
988 | page.page('\n'.join(sorted(search_result))) | |
990 |
|
989 | |||
991 |
|
990 | |||
992 | def _render_signature(obj_signature, obj_name) -> str: |
|
991 | def _render_signature(obj_signature, obj_name) -> str: | |
993 | """ |
|
992 | """ | |
994 | This was mostly taken from inspect.Signature.__str__. |
|
993 | This was mostly taken from inspect.Signature.__str__. | |
995 | Look there for the comments. |
|
994 | Look there for the comments. | |
996 | The only change is to add linebreaks when this gets too long. |
|
995 | The only change is to add linebreaks when this gets too long. | |
997 | """ |
|
996 | """ | |
998 | result = [] |
|
997 | result = [] | |
999 | pos_only = False |
|
998 | pos_only = False | |
1000 | kw_only = True |
|
999 | kw_only = True | |
1001 | for param in obj_signature.parameters.values(): |
|
1000 | for param in obj_signature.parameters.values(): | |
1002 | if param.kind == inspect._POSITIONAL_ONLY: |
|
1001 | if param.kind == inspect._POSITIONAL_ONLY: | |
1003 | pos_only = True |
|
1002 | pos_only = True | |
1004 | elif pos_only: |
|
1003 | elif pos_only: | |
1005 | result.append('/') |
|
1004 | result.append('/') | |
1006 | pos_only = False |
|
1005 | pos_only = False | |
1007 |
|
1006 | |||
1008 | if param.kind == inspect._VAR_POSITIONAL: |
|
1007 | if param.kind == inspect._VAR_POSITIONAL: | |
1009 | kw_only = False |
|
1008 | kw_only = False | |
1010 | elif param.kind == inspect._KEYWORD_ONLY and kw_only: |
|
1009 | elif param.kind == inspect._KEYWORD_ONLY and kw_only: | |
1011 | result.append('*') |
|
1010 | result.append('*') | |
1012 | kw_only = False |
|
1011 | kw_only = False | |
1013 |
|
1012 | |||
1014 | result.append(str(param)) |
|
1013 | result.append(str(param)) | |
1015 |
|
1014 | |||
1016 | if pos_only: |
|
1015 | if pos_only: | |
1017 | result.append('/') |
|
1016 | result.append('/') | |
1018 |
|
1017 | |||
1019 | # add up name, parameters, braces (2), and commas |
|
1018 | # add up name, parameters, braces (2), and commas | |
1020 | if len(obj_name) + sum(len(r) + 2 for r in result) > 75: |
|
1019 | if len(obj_name) + sum(len(r) + 2 for r in result) > 75: | |
1021 | # This doesnβt fit behind βSignature: β in an inspect window. |
|
1020 | # This doesnβt fit behind βSignature: β in an inspect window. | |
1022 | rendered = '{}(\n{})'.format(obj_name, ''.join( |
|
1021 | rendered = '{}(\n{})'.format(obj_name, ''.join( | |
1023 | ' {},\n'.format(r) for r in result) |
|
1022 | ' {},\n'.format(r) for r in result) | |
1024 | ) |
|
1023 | ) | |
1025 | else: |
|
1024 | else: | |
1026 | rendered = '{}({})'.format(obj_name, ', '.join(result)) |
|
1025 | rendered = '{}({})'.format(obj_name, ', '.join(result)) | |
1027 |
|
1026 | |||
1028 | if obj_signature.return_annotation is not inspect._empty: |
|
1027 | if obj_signature.return_annotation is not inspect._empty: | |
1029 | anno = inspect.formatannotation(obj_signature.return_annotation) |
|
1028 | anno = inspect.formatannotation(obj_signature.return_annotation) | |
1030 | rendered += ' -> {}'.format(anno) |
|
1029 | rendered += ' -> {}'.format(anno) | |
1031 |
|
1030 | |||
1032 | return rendered |
|
1031 | return rendered |
@@ -1,347 +1,341 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | Provides a reload() function that acts recursively. |
|
3 | Provides a reload() function that acts recursively. | |
4 |
|
4 | |||
5 | Python's normal :func:`python:reload` function only reloads the module that it's |
|
5 | Python's normal :func:`python:reload` function only reloads the module that it's | |
6 | passed. The :func:`reload` function in this module also reloads everything |
|
6 | passed. The :func:`reload` function in this module also reloads everything | |
7 | imported from that module, which is useful when you're changing files deep |
|
7 | imported from that module, which is useful when you're changing files deep | |
8 | inside a package. |
|
8 | inside a package. | |
9 |
|
9 | |||
10 |
To use this as your default reload function, type this |
|
10 | To use this as your default reload function, type this:: | |
11 |
|
||||
12 | import __builtin__ |
|
|||
13 | from IPython.lib import deepreload |
|
|||
14 | __builtin__.reload = deepreload.reload |
|
|||
15 |
|
||||
16 | Or this for Python 3:: |
|
|||
17 |
|
11 | |||
18 | import builtins |
|
12 | import builtins | |
19 | from IPython.lib import deepreload |
|
13 | from IPython.lib import deepreload | |
20 | builtins.reload = deepreload.reload |
|
14 | builtins.reload = deepreload.reload | |
21 |
|
15 | |||
22 | A reference to the original :func:`python:reload` is stored in this module as |
|
16 | A reference to the original :func:`python:reload` is stored in this module as | |
23 | :data:`original_reload`, so you can restore it later. |
|
17 | :data:`original_reload`, so you can restore it later. | |
24 |
|
18 | |||
25 | This code is almost entirely based on knee.py, which is a Python |
|
19 | This code is almost entirely based on knee.py, which is a Python | |
26 | re-implementation of hierarchical module import. |
|
20 | re-implementation of hierarchical module import. | |
27 | """ |
|
21 | """ | |
28 | #***************************************************************************** |
|
22 | #***************************************************************************** | |
29 | # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu> |
|
23 | # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu> | |
30 | # |
|
24 | # | |
31 | # Distributed under the terms of the BSD License. The full license is in |
|
25 | # Distributed under the terms of the BSD License. The full license is in | |
32 | # the file COPYING, distributed as part of this software. |
|
26 | # the file COPYING, distributed as part of this software. | |
33 | #***************************************************************************** |
|
27 | #***************************************************************************** | |
34 |
|
28 | |||
35 | import builtins as builtin_mod |
|
29 | import builtins as builtin_mod | |
36 | from contextlib import contextmanager |
|
30 | from contextlib import contextmanager | |
37 | import imp |
|
31 | import imp | |
38 | import sys |
|
32 | import sys | |
39 |
|
33 | |||
40 | from types import ModuleType |
|
34 | from types import ModuleType | |
41 | from warnings import warn |
|
35 | from warnings import warn | |
42 | import types |
|
36 | import types | |
43 |
|
37 | |||
44 | original_import = builtin_mod.__import__ |
|
38 | original_import = builtin_mod.__import__ | |
45 |
|
39 | |||
46 | @contextmanager |
|
40 | @contextmanager | |
47 | def replace_import_hook(new_import): |
|
41 | def replace_import_hook(new_import): | |
48 | saved_import = builtin_mod.__import__ |
|
42 | saved_import = builtin_mod.__import__ | |
49 | builtin_mod.__import__ = new_import |
|
43 | builtin_mod.__import__ = new_import | |
50 | try: |
|
44 | try: | |
51 | yield |
|
45 | yield | |
52 | finally: |
|
46 | finally: | |
53 | builtin_mod.__import__ = saved_import |
|
47 | builtin_mod.__import__ = saved_import | |
54 |
|
48 | |||
55 | def get_parent(globals, level): |
|
49 | def get_parent(globals, level): | |
56 | """ |
|
50 | """ | |
57 | parent, name = get_parent(globals, level) |
|
51 | parent, name = get_parent(globals, level) | |
58 |
|
52 | |||
59 | Return the package that an import is being performed in. If globals comes |
|
53 | Return the package that an import is being performed in. If globals comes | |
60 | from the module foo.bar.bat (not itself a package), this returns the |
|
54 | from the module foo.bar.bat (not itself a package), this returns the | |
61 | sys.modules entry for foo.bar. If globals is from a package's __init__.py, |
|
55 | sys.modules entry for foo.bar. If globals is from a package's __init__.py, | |
62 | the package's entry in sys.modules is returned. |
|
56 | the package's entry in sys.modules is returned. | |
63 |
|
57 | |||
64 | If globals doesn't come from a package or a module in a package, or a |
|
58 | If globals doesn't come from a package or a module in a package, or a | |
65 | corresponding entry is not found in sys.modules, None is returned. |
|
59 | corresponding entry is not found in sys.modules, None is returned. | |
66 | """ |
|
60 | """ | |
67 | orig_level = level |
|
61 | orig_level = level | |
68 |
|
62 | |||
69 | if not level or not isinstance(globals, dict): |
|
63 | if not level or not isinstance(globals, dict): | |
70 | return None, '' |
|
64 | return None, '' | |
71 |
|
65 | |||
72 | pkgname = globals.get('__package__', None) |
|
66 | pkgname = globals.get('__package__', None) | |
73 |
|
67 | |||
74 | if pkgname is not None: |
|
68 | if pkgname is not None: | |
75 | # __package__ is set, so use it |
|
69 | # __package__ is set, so use it | |
76 | if not hasattr(pkgname, 'rindex'): |
|
70 | if not hasattr(pkgname, 'rindex'): | |
77 | raise ValueError('__package__ set to non-string') |
|
71 | raise ValueError('__package__ set to non-string') | |
78 | if len(pkgname) == 0: |
|
72 | if len(pkgname) == 0: | |
79 | if level > 0: |
|
73 | if level > 0: | |
80 | raise ValueError('Attempted relative import in non-package') |
|
74 | raise ValueError('Attempted relative import in non-package') | |
81 | return None, '' |
|
75 | return None, '' | |
82 | name = pkgname |
|
76 | name = pkgname | |
83 | else: |
|
77 | else: | |
84 | # __package__ not set, so figure it out and set it |
|
78 | # __package__ not set, so figure it out and set it | |
85 | if '__name__' not in globals: |
|
79 | if '__name__' not in globals: | |
86 | return None, '' |
|
80 | return None, '' | |
87 | modname = globals['__name__'] |
|
81 | modname = globals['__name__'] | |
88 |
|
82 | |||
89 | if '__path__' in globals: |
|
83 | if '__path__' in globals: | |
90 | # __path__ is set, so modname is already the package name |
|
84 | # __path__ is set, so modname is already the package name | |
91 | globals['__package__'] = name = modname |
|
85 | globals['__package__'] = name = modname | |
92 | else: |
|
86 | else: | |
93 | # Normal module, so work out the package name if any |
|
87 | # Normal module, so work out the package name if any | |
94 | lastdot = modname.rfind('.') |
|
88 | lastdot = modname.rfind('.') | |
95 | if lastdot < 0 < level: |
|
89 | if lastdot < 0 < level: | |
96 | raise ValueError("Attempted relative import in non-package") |
|
90 | raise ValueError("Attempted relative import in non-package") | |
97 | if lastdot < 0: |
|
91 | if lastdot < 0: | |
98 | globals['__package__'] = None |
|
92 | globals['__package__'] = None | |
99 | return None, '' |
|
93 | return None, '' | |
100 | globals['__package__'] = name = modname[:lastdot] |
|
94 | globals['__package__'] = name = modname[:lastdot] | |
101 |
|
95 | |||
102 | dot = len(name) |
|
96 | dot = len(name) | |
103 | for x in range(level, 1, -1): |
|
97 | for x in range(level, 1, -1): | |
104 | try: |
|
98 | try: | |
105 | dot = name.rindex('.', 0, dot) |
|
99 | dot = name.rindex('.', 0, dot) | |
106 | except ValueError: |
|
100 | except ValueError: | |
107 | raise ValueError("attempted relative import beyond top-level " |
|
101 | raise ValueError("attempted relative import beyond top-level " | |
108 | "package") |
|
102 | "package") | |
109 | name = name[:dot] |
|
103 | name = name[:dot] | |
110 |
|
104 | |||
111 | try: |
|
105 | try: | |
112 | parent = sys.modules[name] |
|
106 | parent = sys.modules[name] | |
113 | except: |
|
107 | except: | |
114 | if orig_level < 1: |
|
108 | if orig_level < 1: | |
115 | warn("Parent module '%.200s' not found while handling absolute " |
|
109 | warn("Parent module '%.200s' not found while handling absolute " | |
116 | "import" % name) |
|
110 | "import" % name) | |
117 | parent = None |
|
111 | parent = None | |
118 | else: |
|
112 | else: | |
119 | raise SystemError("Parent module '%.200s' not loaded, cannot " |
|
113 | raise SystemError("Parent module '%.200s' not loaded, cannot " | |
120 | "perform relative import" % name) |
|
114 | "perform relative import" % name) | |
121 |
|
115 | |||
122 | # We expect, but can't guarantee, if parent != None, that: |
|
116 | # We expect, but can't guarantee, if parent != None, that: | |
123 | # - parent.__name__ == name |
|
117 | # - parent.__name__ == name | |
124 | # - parent.__dict__ is globals |
|
118 | # - parent.__dict__ is globals | |
125 | # If this is violated... Who cares? |
|
119 | # If this is violated... Who cares? | |
126 | return parent, name |
|
120 | return parent, name | |
127 |
|
121 | |||
128 | def load_next(mod, altmod, name, buf): |
|
122 | def load_next(mod, altmod, name, buf): | |
129 | """ |
|
123 | """ | |
130 | mod, name, buf = load_next(mod, altmod, name, buf) |
|
124 | mod, name, buf = load_next(mod, altmod, name, buf) | |
131 |
|
125 | |||
132 | altmod is either None or same as mod |
|
126 | altmod is either None or same as mod | |
133 | """ |
|
127 | """ | |
134 |
|
128 | |||
135 | if len(name) == 0: |
|
129 | if len(name) == 0: | |
136 | # completely empty module name should only happen in |
|
130 | # completely empty module name should only happen in | |
137 | # 'from . import' (or '__import__("")') |
|
131 | # 'from . import' (or '__import__("")') | |
138 | return mod, None, buf |
|
132 | return mod, None, buf | |
139 |
|
133 | |||
140 | dot = name.find('.') |
|
134 | dot = name.find('.') | |
141 | if dot == 0: |
|
135 | if dot == 0: | |
142 | raise ValueError('Empty module name') |
|
136 | raise ValueError('Empty module name') | |
143 |
|
137 | |||
144 | if dot < 0: |
|
138 | if dot < 0: | |
145 | subname = name |
|
139 | subname = name | |
146 | next = None |
|
140 | next = None | |
147 | else: |
|
141 | else: | |
148 | subname = name[:dot] |
|
142 | subname = name[:dot] | |
149 | next = name[dot+1:] |
|
143 | next = name[dot+1:] | |
150 |
|
144 | |||
151 | if buf != '': |
|
145 | if buf != '': | |
152 | buf += '.' |
|
146 | buf += '.' | |
153 | buf += subname |
|
147 | buf += subname | |
154 |
|
148 | |||
155 | result = import_submodule(mod, subname, buf) |
|
149 | result = import_submodule(mod, subname, buf) | |
156 | if result is None and mod != altmod: |
|
150 | if result is None and mod != altmod: | |
157 | result = import_submodule(altmod, subname, subname) |
|
151 | result = import_submodule(altmod, subname, subname) | |
158 | if result is not None: |
|
152 | if result is not None: | |
159 | buf = subname |
|
153 | buf = subname | |
160 |
|
154 | |||
161 | if result is None: |
|
155 | if result is None: | |
162 | raise ImportError("No module named %.200s" % name) |
|
156 | raise ImportError("No module named %.200s" % name) | |
163 |
|
157 | |||
164 | return result, next, buf |
|
158 | return result, next, buf | |
165 |
|
159 | |||
166 |
|
160 | |||
167 | # Need to keep track of what we've already reloaded to prevent cyclic evil |
|
161 | # Need to keep track of what we've already reloaded to prevent cyclic evil | |
168 | found_now = {} |
|
162 | found_now = {} | |
169 |
|
163 | |||
170 | def import_submodule(mod, subname, fullname): |
|
164 | def import_submodule(mod, subname, fullname): | |
171 | """m = import_submodule(mod, subname, fullname)""" |
|
165 | """m = import_submodule(mod, subname, fullname)""" | |
172 | # Require: |
|
166 | # Require: | |
173 | # if mod == None: subname == fullname |
|
167 | # if mod == None: subname == fullname | |
174 | # else: mod.__name__ + "." + subname == fullname |
|
168 | # else: mod.__name__ + "." + subname == fullname | |
175 |
|
169 | |||
176 | global found_now |
|
170 | global found_now | |
177 | if fullname in found_now and fullname in sys.modules: |
|
171 | if fullname in found_now and fullname in sys.modules: | |
178 | m = sys.modules[fullname] |
|
172 | m = sys.modules[fullname] | |
179 | else: |
|
173 | else: | |
180 | print('Reloading', fullname) |
|
174 | print('Reloading', fullname) | |
181 | found_now[fullname] = 1 |
|
175 | found_now[fullname] = 1 | |
182 | oldm = sys.modules.get(fullname, None) |
|
176 | oldm = sys.modules.get(fullname, None) | |
183 |
|
177 | |||
184 | if mod is None: |
|
178 | if mod is None: | |
185 | path = None |
|
179 | path = None | |
186 | elif hasattr(mod, '__path__'): |
|
180 | elif hasattr(mod, '__path__'): | |
187 | path = mod.__path__ |
|
181 | path = mod.__path__ | |
188 | else: |
|
182 | else: | |
189 | return None |
|
183 | return None | |
190 |
|
184 | |||
191 | try: |
|
185 | try: | |
192 | # This appears to be necessary on Python 3, because imp.find_module() |
|
186 | # This appears to be necessary on Python 3, because imp.find_module() | |
193 | # tries to import standard libraries (like io) itself, and we don't |
|
187 | # tries to import standard libraries (like io) itself, and we don't | |
194 | # want them to be processed by our deep_import_hook. |
|
188 | # want them to be processed by our deep_import_hook. | |
195 | with replace_import_hook(original_import): |
|
189 | with replace_import_hook(original_import): | |
196 | fp, filename, stuff = imp.find_module(subname, path) |
|
190 | fp, filename, stuff = imp.find_module(subname, path) | |
197 | except ImportError: |
|
191 | except ImportError: | |
198 | return None |
|
192 | return None | |
199 |
|
193 | |||
200 | try: |
|
194 | try: | |
201 | m = imp.load_module(fullname, fp, filename, stuff) |
|
195 | m = imp.load_module(fullname, fp, filename, stuff) | |
202 | except: |
|
196 | except: | |
203 | # load_module probably removed name from modules because of |
|
197 | # load_module probably removed name from modules because of | |
204 | # the error. Put back the original module object. |
|
198 | # the error. Put back the original module object. | |
205 | if oldm: |
|
199 | if oldm: | |
206 | sys.modules[fullname] = oldm |
|
200 | sys.modules[fullname] = oldm | |
207 | raise |
|
201 | raise | |
208 | finally: |
|
202 | finally: | |
209 | if fp: fp.close() |
|
203 | if fp: fp.close() | |
210 |
|
204 | |||
211 | add_submodule(mod, m, fullname, subname) |
|
205 | add_submodule(mod, m, fullname, subname) | |
212 |
|
206 | |||
213 | return m |
|
207 | return m | |
214 |
|
208 | |||
215 | def add_submodule(mod, submod, fullname, subname): |
|
209 | def add_submodule(mod, submod, fullname, subname): | |
216 | """mod.{subname} = submod""" |
|
210 | """mod.{subname} = submod""" | |
217 | if mod is None: |
|
211 | if mod is None: | |
218 | return #Nothing to do here. |
|
212 | return #Nothing to do here. | |
219 |
|
213 | |||
220 | if submod is None: |
|
214 | if submod is None: | |
221 | submod = sys.modules[fullname] |
|
215 | submod = sys.modules[fullname] | |
222 |
|
216 | |||
223 | setattr(mod, subname, submod) |
|
217 | setattr(mod, subname, submod) | |
224 |
|
218 | |||
225 | return |
|
219 | return | |
226 |
|
220 | |||
227 | def ensure_fromlist(mod, fromlist, buf, recursive): |
|
221 | def ensure_fromlist(mod, fromlist, buf, recursive): | |
228 | """Handle 'from module import a, b, c' imports.""" |
|
222 | """Handle 'from module import a, b, c' imports.""" | |
229 | if not hasattr(mod, '__path__'): |
|
223 | if not hasattr(mod, '__path__'): | |
230 | return |
|
224 | return | |
231 | for item in fromlist: |
|
225 | for item in fromlist: | |
232 | if not hasattr(item, 'rindex'): |
|
226 | if not hasattr(item, 'rindex'): | |
233 | raise TypeError("Item in ``from list'' not a string") |
|
227 | raise TypeError("Item in ``from list'' not a string") | |
234 | if item == '*': |
|
228 | if item == '*': | |
235 | if recursive: |
|
229 | if recursive: | |
236 | continue # avoid endless recursion |
|
230 | continue # avoid endless recursion | |
237 | try: |
|
231 | try: | |
238 | all = mod.__all__ |
|
232 | all = mod.__all__ | |
239 | except AttributeError: |
|
233 | except AttributeError: | |
240 | pass |
|
234 | pass | |
241 | else: |
|
235 | else: | |
242 | ret = ensure_fromlist(mod, all, buf, 1) |
|
236 | ret = ensure_fromlist(mod, all, buf, 1) | |
243 | if not ret: |
|
237 | if not ret: | |
244 | return 0 |
|
238 | return 0 | |
245 | elif not hasattr(mod, item): |
|
239 | elif not hasattr(mod, item): | |
246 | import_submodule(mod, item, buf + '.' + item) |
|
240 | import_submodule(mod, item, buf + '.' + item) | |
247 |
|
241 | |||
248 | def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1): |
|
242 | def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1): | |
249 | """Replacement for __import__()""" |
|
243 | """Replacement for __import__()""" | |
250 | parent, buf = get_parent(globals, level) |
|
244 | parent, buf = get_parent(globals, level) | |
251 |
|
245 | |||
252 | head, name, buf = load_next(parent, None if level < 0 else parent, name, buf) |
|
246 | head, name, buf = load_next(parent, None if level < 0 else parent, name, buf) | |
253 |
|
247 | |||
254 | tail = head |
|
248 | tail = head | |
255 | while name: |
|
249 | while name: | |
256 | tail, name, buf = load_next(tail, tail, name, buf) |
|
250 | tail, name, buf = load_next(tail, tail, name, buf) | |
257 |
|
251 | |||
258 | # If tail is None, both get_parent and load_next found |
|
252 | # If tail is None, both get_parent and load_next found | |
259 | # an empty module name: someone called __import__("") or |
|
253 | # an empty module name: someone called __import__("") or | |
260 | # doctored faulty bytecode |
|
254 | # doctored faulty bytecode | |
261 | if tail is None: |
|
255 | if tail is None: | |
262 | raise ValueError('Empty module name') |
|
256 | raise ValueError('Empty module name') | |
263 |
|
257 | |||
264 | if not fromlist: |
|
258 | if not fromlist: | |
265 | return head |
|
259 | return head | |
266 |
|
260 | |||
267 | ensure_fromlist(tail, fromlist, buf, 0) |
|
261 | ensure_fromlist(tail, fromlist, buf, 0) | |
268 | return tail |
|
262 | return tail | |
269 |
|
263 | |||
270 | modules_reloading = {} |
|
264 | modules_reloading = {} | |
271 |
|
265 | |||
272 | def deep_reload_hook(m): |
|
266 | def deep_reload_hook(m): | |
273 | """Replacement for reload().""" |
|
267 | """Replacement for reload().""" | |
274 | # Hardcode this one as it would raise a NotImplementedError from the |
|
268 | # Hardcode this one as it would raise a NotImplementedError from the | |
275 | # bowels of Python and screw up the import machinery after. |
|
269 | # bowels of Python and screw up the import machinery after. | |
276 | # unlike other imports the `exclude` list already in place is not enough. |
|
270 | # unlike other imports the `exclude` list already in place is not enough. | |
277 |
|
271 | |||
278 | if m is types: |
|
272 | if m is types: | |
279 | return m |
|
273 | return m | |
280 | if not isinstance(m, ModuleType): |
|
274 | if not isinstance(m, ModuleType): | |
281 | raise TypeError("reload() argument must be module") |
|
275 | raise TypeError("reload() argument must be module") | |
282 |
|
276 | |||
283 | name = m.__name__ |
|
277 | name = m.__name__ | |
284 |
|
278 | |||
285 | if name not in sys.modules: |
|
279 | if name not in sys.modules: | |
286 | raise ImportError("reload(): module %.200s not in sys.modules" % name) |
|
280 | raise ImportError("reload(): module %.200s not in sys.modules" % name) | |
287 |
|
281 | |||
288 | global modules_reloading |
|
282 | global modules_reloading | |
289 | try: |
|
283 | try: | |
290 | return modules_reloading[name] |
|
284 | return modules_reloading[name] | |
291 | except: |
|
285 | except: | |
292 | modules_reloading[name] = m |
|
286 | modules_reloading[name] = m | |
293 |
|
287 | |||
294 | dot = name.rfind('.') |
|
288 | dot = name.rfind('.') | |
295 | if dot < 0: |
|
289 | if dot < 0: | |
296 | subname = name |
|
290 | subname = name | |
297 | path = None |
|
291 | path = None | |
298 | else: |
|
292 | else: | |
299 | try: |
|
293 | try: | |
300 | parent = sys.modules[name[:dot]] |
|
294 | parent = sys.modules[name[:dot]] | |
301 | except KeyError: |
|
295 | except KeyError: | |
302 | modules_reloading.clear() |
|
296 | modules_reloading.clear() | |
303 | raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) |
|
297 | raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) | |
304 | subname = name[dot+1:] |
|
298 | subname = name[dot+1:] | |
305 | path = getattr(parent, "__path__", None) |
|
299 | path = getattr(parent, "__path__", None) | |
306 |
|
300 | |||
307 | try: |
|
301 | try: | |
308 | # This appears to be necessary on Python 3, because imp.find_module() |
|
302 | # This appears to be necessary on Python 3, because imp.find_module() | |
309 | # tries to import standard libraries (like io) itself, and we don't |
|
303 | # tries to import standard libraries (like io) itself, and we don't | |
310 | # want them to be processed by our deep_import_hook. |
|
304 | # want them to be processed by our deep_import_hook. | |
311 | with replace_import_hook(original_import): |
|
305 | with replace_import_hook(original_import): | |
312 | fp, filename, stuff = imp.find_module(subname, path) |
|
306 | fp, filename, stuff = imp.find_module(subname, path) | |
313 | finally: |
|
307 | finally: | |
314 | modules_reloading.clear() |
|
308 | modules_reloading.clear() | |
315 |
|
309 | |||
316 | try: |
|
310 | try: | |
317 | newm = imp.load_module(name, fp, filename, stuff) |
|
311 | newm = imp.load_module(name, fp, filename, stuff) | |
318 | except: |
|
312 | except: | |
319 | # load_module probably removed name from modules because of |
|
313 | # load_module probably removed name from modules because of | |
320 | # the error. Put back the original module object. |
|
314 | # the error. Put back the original module object. | |
321 | sys.modules[name] = m |
|
315 | sys.modules[name] = m | |
322 | raise |
|
316 | raise | |
323 | finally: |
|
317 | finally: | |
324 | if fp: fp.close() |
|
318 | if fp: fp.close() | |
325 |
|
319 | |||
326 | modules_reloading.clear() |
|
320 | modules_reloading.clear() | |
327 | return newm |
|
321 | return newm | |
328 |
|
322 | |||
329 | # Save the original hooks |
|
323 | # Save the original hooks | |
330 | original_reload = imp.reload |
|
324 | original_reload = imp.reload | |
331 |
|
325 | |||
332 | # Replacement for reload() |
|
326 | # Replacement for reload() | |
333 | def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__', |
|
327 | def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__', | |
334 | 'numpy', 'numpy._globals')): |
|
328 | 'numpy', 'numpy._globals')): | |
335 | """Recursively reload all modules used in the given module. Optionally |
|
329 | """Recursively reload all modules used in the given module. Optionally | |
336 | takes a list of modules to exclude from reloading. The default exclude |
|
330 | takes a list of modules to exclude from reloading. The default exclude | |
337 | list contains sys, __main__, and __builtin__, to prevent, e.g., resetting |
|
331 | list contains sys, __main__, and __builtin__, to prevent, e.g., resetting | |
338 | display, exception, and io hooks. |
|
332 | display, exception, and io hooks. | |
339 | """ |
|
333 | """ | |
340 | global found_now |
|
334 | global found_now | |
341 | for i in exclude: |
|
335 | for i in exclude: | |
342 | found_now[i] = 1 |
|
336 | found_now[i] = 1 | |
343 | try: |
|
337 | try: | |
344 | with replace_import_hook(deep_import_hook): |
|
338 | with replace_import_hook(deep_import_hook): | |
345 | return deep_reload_hook(module) |
|
339 | return deep_reload_hook(module) | |
346 | finally: |
|
340 | finally: | |
347 | found_now = {} |
|
341 | found_now = {} |
@@ -1,532 +1,532 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | Defines a variety of Pygments lexers for highlighting IPython code. |
|
3 | Defines a variety of Pygments lexers for highlighting IPython code. | |
4 |
|
4 | |||
5 | This includes: |
|
5 | This includes: | |
6 |
|
6 | |||
7 | IPythonLexer, IPython3Lexer |
|
7 | IPythonLexer, IPython3Lexer | |
8 | Lexers for pure IPython (python + magic/shell commands) |
|
8 | Lexers for pure IPython (python + magic/shell commands) | |
9 |
|
9 | |||
10 | IPythonPartialTracebackLexer, IPythonTracebackLexer |
|
10 | IPythonPartialTracebackLexer, IPythonTracebackLexer | |
11 | Supports 2.x and 3.x via keyword `python3`. The partial traceback |
|
11 | Supports 2.x and 3.x via keyword `python3`. The partial traceback | |
12 | lexer reads everything but the Python code appearing in a traceback. |
|
12 | lexer reads everything but the Python code appearing in a traceback. | |
13 | The full lexer combines the partial lexer with an IPython lexer. |
|
13 | The full lexer combines the partial lexer with an IPython lexer. | |
14 |
|
14 | |||
15 | IPythonConsoleLexer |
|
15 | IPythonConsoleLexer | |
16 | A lexer for IPython console sessions, with support for tracebacks. |
|
16 | A lexer for IPython console sessions, with support for tracebacks. | |
17 |
|
17 | |||
18 | IPyLexer |
|
18 | IPyLexer | |
19 | A friendly lexer which examines the first line of text and from it, |
|
19 | A friendly lexer which examines the first line of text and from it, | |
20 | decides whether to use an IPython lexer or an IPython console lexer. |
|
20 | decides whether to use an IPython lexer or an IPython console lexer. | |
21 | This is probably the only lexer that needs to be explicitly added |
|
21 | This is probably the only lexer that needs to be explicitly added | |
22 | to Pygments. |
|
22 | to Pygments. | |
23 |
|
23 | |||
24 | """ |
|
24 | """ | |
25 | #----------------------------------------------------------------------------- |
|
25 | #----------------------------------------------------------------------------- | |
26 | # Copyright (c) 2013, the IPython Development Team. |
|
26 | # Copyright (c) 2013, the IPython Development Team. | |
27 | # |
|
27 | # | |
28 | # Distributed under the terms of the Modified BSD License. |
|
28 | # Distributed under the terms of the Modified BSD License. | |
29 | # |
|
29 | # | |
30 | # The full license is in the file COPYING.txt, distributed with this software. |
|
30 | # The full license is in the file COPYING.txt, distributed with this software. | |
31 | #----------------------------------------------------------------------------- |
|
31 | #----------------------------------------------------------------------------- | |
32 |
|
32 | |||
33 | # Standard library |
|
33 | # Standard library | |
34 | import re |
|
34 | import re | |
35 |
|
35 | |||
36 | # Third party |
|
36 | # Third party | |
37 | from pygments.lexers import ( |
|
37 | from pygments.lexers import ( | |
38 | BashLexer, HtmlLexer, JavascriptLexer, RubyLexer, PerlLexer, PythonLexer, |
|
38 | BashLexer, HtmlLexer, JavascriptLexer, RubyLexer, PerlLexer, PythonLexer, | |
39 | Python3Lexer, TexLexer) |
|
39 | Python3Lexer, TexLexer) | |
40 | from pygments.lexer import ( |
|
40 | from pygments.lexer import ( | |
41 | Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using, |
|
41 | Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using, | |
42 | ) |
|
42 | ) | |
43 | from pygments.token import ( |
|
43 | from pygments.token import ( | |
44 | Generic, Keyword, Literal, Name, Operator, Other, Text, Error, |
|
44 | Generic, Keyword, Literal, Name, Operator, Other, Text, Error, | |
45 | ) |
|
45 | ) | |
46 | from pygments.util import get_bool_opt |
|
46 | from pygments.util import get_bool_opt | |
47 |
|
47 | |||
48 | # Local |
|
48 | # Local | |
49 |
|
49 | |||
50 | line_re = re.compile('.*?\n') |
|
50 | line_re = re.compile('.*?\n') | |
51 |
|
51 | |||
52 | __all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer', |
|
52 | __all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer', | |
53 | 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer', |
|
53 | 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer', | |
54 | 'IPythonConsoleLexer', 'IPyLexer'] |
|
54 | 'IPythonConsoleLexer', 'IPyLexer'] | |
55 |
|
55 | |||
56 |
|
56 | |||
57 | def build_ipy_lexer(python3): |
|
57 | def build_ipy_lexer(python3): | |
58 | """Builds IPython lexers depending on the value of `python3`. |
|
58 | """Builds IPython lexers depending on the value of `python3`. | |
59 |
|
59 | |||
60 | The lexer inherits from an appropriate Python lexer and then adds |
|
60 | The lexer inherits from an appropriate Python lexer and then adds | |
61 | information about IPython specific keywords (i.e. magic commands, |
|
61 | information about IPython specific keywords (i.e. magic commands, | |
62 | shell commands, etc.) |
|
62 | shell commands, etc.) | |
63 |
|
63 | |||
64 | Parameters |
|
64 | Parameters | |
65 | ---------- |
|
65 | ---------- | |
66 | python3 : bool |
|
66 | python3 : bool | |
67 | If `True`, then build an IPython lexer from a Python 3 lexer. |
|
67 | If `True`, then build an IPython lexer from a Python 3 lexer. | |
68 |
|
68 | |||
69 | """ |
|
69 | """ | |
70 | # It would be nice to have a single IPython lexer class which takes |
|
70 | # It would be nice to have a single IPython lexer class which takes | |
71 | # a boolean `python3`. But since there are two Python lexer classes, |
|
71 | # a boolean `python3`. But since there are two Python lexer classes, | |
72 | # we will also have two IPython lexer classes. |
|
72 | # we will also have two IPython lexer classes. | |
73 | if python3: |
|
73 | if python3: | |
74 | PyLexer = Python3Lexer |
|
74 | PyLexer = Python3Lexer | |
75 | name = 'IPython3' |
|
75 | name = 'IPython3' | |
76 | aliases = ['ipython3'] |
|
76 | aliases = ['ipython3'] | |
77 | doc = """IPython3 Lexer""" |
|
77 | doc = """IPython3 Lexer""" | |
78 | else: |
|
78 | else: | |
79 | PyLexer = PythonLexer |
|
79 | PyLexer = PythonLexer | |
80 | name = 'IPython' |
|
80 | name = 'IPython' | |
81 | aliases = ['ipython2', 'ipython'] |
|
81 | aliases = ['ipython2', 'ipython'] | |
82 | doc = """IPython Lexer""" |
|
82 | doc = """IPython Lexer""" | |
83 |
|
83 | |||
84 | ipython_tokens = [ |
|
84 | ipython_tokens = [ | |
85 | (r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
85 | (r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
86 | (r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
86 | (r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
87 | (r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))), |
|
87 | (r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))), | |
88 | (r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), |
|
88 | (r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), | |
89 | (r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), |
|
89 | (r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))), | |
90 | (r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))), |
|
90 | (r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))), | |
91 | (r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))), |
|
91 | (r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))), | |
92 | (r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
92 | (r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
93 | (r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
93 | (r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
94 | (r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
94 | (r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
95 | (r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))), |
|
95 | (r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))), | |
96 | (r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))), |
|
96 | (r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))), | |
97 | (r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))), |
|
97 | (r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))), | |
98 | (r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
98 | (r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
99 | (r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
99 | (r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
100 | (r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
100 | (r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
101 | (r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), |
|
101 | (r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))), | |
102 | (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)), |
|
102 | (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)), | |
103 | (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))), |
|
103 | (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))), | |
104 | (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)), |
|
104 | (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)), | |
105 | (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)), |
|
105 | (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)), | |
106 | (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword, |
|
106 | (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword, | |
107 | using(BashLexer), Text)), |
|
107 | using(BashLexer), Text)), | |
108 | (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)), |
|
108 | (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)), | |
109 | (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), |
|
109 | (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), | |
110 | (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), |
|
110 | (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), | |
111 | (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)), |
|
111 | (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)), | |
112 | (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)), |
|
112 | (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)), | |
113 | ] |
|
113 | ] | |
114 |
|
114 | |||
115 | tokens = PyLexer.tokens.copy() |
|
115 | tokens = PyLexer.tokens.copy() | |
116 | tokens['root'] = ipython_tokens + tokens['root'] |
|
116 | tokens['root'] = ipython_tokens + tokens['root'] | |
117 |
|
117 | |||
118 | attrs = {'name': name, 'aliases': aliases, 'filenames': [], |
|
118 | attrs = {'name': name, 'aliases': aliases, 'filenames': [], | |
119 | '__doc__': doc, 'tokens': tokens} |
|
119 | '__doc__': doc, 'tokens': tokens} | |
120 |
|
120 | |||
121 | return type(name, (PyLexer,), attrs) |
|
121 | return type(name, (PyLexer,), attrs) | |
122 |
|
122 | |||
123 |
|
123 | |||
124 | IPython3Lexer = build_ipy_lexer(python3=True) |
|
124 | IPython3Lexer = build_ipy_lexer(python3=True) | |
125 | IPythonLexer = build_ipy_lexer(python3=False) |
|
125 | IPythonLexer = build_ipy_lexer(python3=False) | |
126 |
|
126 | |||
127 |
|
127 | |||
128 | class IPythonPartialTracebackLexer(RegexLexer): |
|
128 | class IPythonPartialTracebackLexer(RegexLexer): | |
129 | """ |
|
129 | """ | |
130 | Partial lexer for IPython tracebacks. |
|
130 | Partial lexer for IPython tracebacks. | |
131 |
|
131 | |||
132 |
Handles all the non-python output. |
|
132 | Handles all the non-python output. | |
133 |
|
133 | |||
134 | """ |
|
134 | """ | |
135 | name = 'IPython Partial Traceback' |
|
135 | name = 'IPython Partial Traceback' | |
136 |
|
136 | |||
137 | tokens = { |
|
137 | tokens = { | |
138 | 'root': [ |
|
138 | 'root': [ | |
139 | # Tracebacks for syntax errors have a different style. |
|
139 | # Tracebacks for syntax errors have a different style. | |
140 | # For both types of tracebacks, we mark the first line with |
|
140 | # For both types of tracebacks, we mark the first line with | |
141 | # Generic.Traceback. For syntax errors, we mark the filename |
|
141 | # Generic.Traceback. For syntax errors, we mark the filename | |
142 | # as we mark the filenames for non-syntax tracebacks. |
|
142 | # as we mark the filenames for non-syntax tracebacks. | |
143 | # |
|
143 | # | |
144 | # These two regexps define how IPythonConsoleLexer finds a |
|
144 | # These two regexps define how IPythonConsoleLexer finds a | |
145 | # traceback. |
|
145 | # traceback. | |
146 | # |
|
146 | # | |
147 | ## Non-syntax traceback |
|
147 | ## Non-syntax traceback | |
148 | (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)), |
|
148 | (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)), | |
149 | ## Syntax traceback |
|
149 | ## Syntax traceback | |
150 | (r'^( File)(.*)(, line )(\d+\n)', |
|
150 | (r'^( File)(.*)(, line )(\d+\n)', | |
151 | bygroups(Generic.Traceback, Name.Namespace, |
|
151 | bygroups(Generic.Traceback, Name.Namespace, | |
152 | Generic.Traceback, Literal.Number.Integer)), |
|
152 | Generic.Traceback, Literal.Number.Integer)), | |
153 |
|
153 | |||
154 | # (Exception Identifier)(Whitespace)(Traceback Message) |
|
154 | # (Exception Identifier)(Whitespace)(Traceback Message) | |
155 | (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)', |
|
155 | (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)', | |
156 | bygroups(Name.Exception, Generic.Whitespace, Text)), |
|
156 | bygroups(Name.Exception, Generic.Whitespace, Text)), | |
157 | # (Module/Filename)(Text)(Callee)(Function Signature) |
|
157 | # (Module/Filename)(Text)(Callee)(Function Signature) | |
158 | # Better options for callee and function signature? |
|
158 | # Better options for callee and function signature? | |
159 | (r'(.*)( in )(.*)(\(.*\)\n)', |
|
159 | (r'(.*)( in )(.*)(\(.*\)\n)', | |
160 | bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)), |
|
160 | bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)), | |
161 | # Regular line: (Whitespace)(Line Number)(Python Code) |
|
161 | # Regular line: (Whitespace)(Line Number)(Python Code) | |
162 | (r'(\s*?)(\d+)(.*?\n)', |
|
162 | (r'(\s*?)(\d+)(.*?\n)', | |
163 | bygroups(Generic.Whitespace, Literal.Number.Integer, Other)), |
|
163 | bygroups(Generic.Whitespace, Literal.Number.Integer, Other)), | |
164 | # Emphasized line: (Arrow)(Line Number)(Python Code) |
|
164 | # Emphasized line: (Arrow)(Line Number)(Python Code) | |
165 | # Using Exception token so arrow color matches the Exception. |
|
165 | # Using Exception token so arrow color matches the Exception. | |
166 | (r'(-*>?\s?)(\d+)(.*?\n)', |
|
166 | (r'(-*>?\s?)(\d+)(.*?\n)', | |
167 | bygroups(Name.Exception, Literal.Number.Integer, Other)), |
|
167 | bygroups(Name.Exception, Literal.Number.Integer, Other)), | |
168 | # (Exception Identifier)(Message) |
|
168 | # (Exception Identifier)(Message) | |
169 | (r'(?u)(^[^\d\W]\w*)(:.*?\n)', |
|
169 | (r'(?u)(^[^\d\W]\w*)(:.*?\n)', | |
170 | bygroups(Name.Exception, Text)), |
|
170 | bygroups(Name.Exception, Text)), | |
171 | # Tag everything else as Other, will be handled later. |
|
171 | # Tag everything else as Other, will be handled later. | |
172 | (r'.*\n', Other), |
|
172 | (r'.*\n', Other), | |
173 | ], |
|
173 | ], | |
174 | } |
|
174 | } | |
175 |
|
175 | |||
176 |
|
176 | |||
177 | class IPythonTracebackLexer(DelegatingLexer): |
|
177 | class IPythonTracebackLexer(DelegatingLexer): | |
178 | """ |
|
178 | """ | |
179 | IPython traceback lexer. |
|
179 | IPython traceback lexer. | |
180 |
|
180 | |||
181 | For doctests, the tracebacks can be snipped as much as desired with the |
|
181 | For doctests, the tracebacks can be snipped as much as desired with the | |
182 | exception to the lines that designate a traceback. For non-syntax error |
|
182 | exception to the lines that designate a traceback. For non-syntax error | |
183 | tracebacks, this is the line of hyphens. For syntax error tracebacks, |
|
183 | tracebacks, this is the line of hyphens. For syntax error tracebacks, | |
184 | this is the line which lists the File and line number. |
|
184 | this is the line which lists the File and line number. | |
185 |
|
185 | |||
186 | """ |
|
186 | """ | |
187 | # The lexer inherits from DelegatingLexer. The "root" lexer is an |
|
187 | # The lexer inherits from DelegatingLexer. The "root" lexer is an | |
188 | # appropriate IPython lexer, which depends on the value of the boolean |
|
188 | # appropriate IPython lexer, which depends on the value of the boolean | |
189 | # `python3`. First, we parse with the partial IPython traceback lexer. |
|
189 | # `python3`. First, we parse with the partial IPython traceback lexer. | |
190 | # Then, any code marked with the "Other" token is delegated to the root |
|
190 | # Then, any code marked with the "Other" token is delegated to the root | |
191 | # lexer. |
|
191 | # lexer. | |
192 | # |
|
192 | # | |
193 | name = 'IPython Traceback' |
|
193 | name = 'IPython Traceback' | |
194 | aliases = ['ipythontb'] |
|
194 | aliases = ['ipythontb'] | |
195 |
|
195 | |||
196 | def __init__(self, **options): |
|
196 | def __init__(self, **options): | |
197 | self.python3 = get_bool_opt(options, 'python3', False) |
|
197 | self.python3 = get_bool_opt(options, 'python3', False) | |
198 | if self.python3: |
|
198 | if self.python3: | |
199 | self.aliases = ['ipython3tb'] |
|
199 | self.aliases = ['ipython3tb'] | |
200 | else: |
|
200 | else: | |
201 | self.aliases = ['ipython2tb', 'ipythontb'] |
|
201 | self.aliases = ['ipython2tb', 'ipythontb'] | |
202 |
|
202 | |||
203 | if self.python3: |
|
203 | if self.python3: | |
204 | IPyLexer = IPython3Lexer |
|
204 | IPyLexer = IPython3Lexer | |
205 | else: |
|
205 | else: | |
206 | IPyLexer = IPythonLexer |
|
206 | IPyLexer = IPythonLexer | |
207 |
|
207 | |||
208 | DelegatingLexer.__init__(self, IPyLexer, |
|
208 | DelegatingLexer.__init__(self, IPyLexer, | |
209 | IPythonPartialTracebackLexer, **options) |
|
209 | IPythonPartialTracebackLexer, **options) | |
210 |
|
210 | |||
211 | class IPythonConsoleLexer(Lexer): |
|
211 | class IPythonConsoleLexer(Lexer): | |
212 | """ |
|
212 | """ | |
213 | An IPython console lexer for IPython code-blocks and doctests, such as: |
|
213 | An IPython console lexer for IPython code-blocks and doctests, such as: | |
214 |
|
214 | |||
215 | .. code-block:: rst |
|
215 | .. code-block:: rst | |
216 |
|
216 | |||
217 | .. code-block:: ipythonconsole |
|
217 | .. code-block:: ipythonconsole | |
218 |
|
218 | |||
219 | In [1]: a = 'foo' |
|
219 | In [1]: a = 'foo' | |
220 |
|
220 | |||
221 | In [2]: a |
|
221 | In [2]: a | |
222 | Out[2]: 'foo' |
|
222 | Out[2]: 'foo' | |
223 |
|
223 | |||
224 | In [3]: print a |
|
224 | In [3]: print a | |
225 | foo |
|
225 | foo | |
226 |
|
226 | |||
227 | In [4]: 1 / 0 |
|
227 | In [4]: 1 / 0 | |
228 |
|
228 | |||
229 |
|
229 | |||
230 | Support is also provided for IPython exceptions: |
|
230 | Support is also provided for IPython exceptions: | |
231 |
|
231 | |||
232 | .. code-block:: rst |
|
232 | .. code-block:: rst | |
233 |
|
233 | |||
234 | .. code-block:: ipythonconsole |
|
234 | .. code-block:: ipythonconsole | |
235 |
|
235 | |||
236 | In [1]: raise Exception |
|
236 | In [1]: raise Exception | |
237 |
|
237 | |||
238 | --------------------------------------------------------------------------- |
|
238 | --------------------------------------------------------------------------- | |
239 | Exception Traceback (most recent call last) |
|
239 | Exception Traceback (most recent call last) | |
240 | <ipython-input-1-fca2ab0ca76b> in <module> |
|
240 | <ipython-input-1-fca2ab0ca76b> in <module> | |
241 | ----> 1 raise Exception |
|
241 | ----> 1 raise Exception | |
242 |
|
242 | |||
243 | Exception: |
|
243 | Exception: | |
244 |
|
244 | |||
245 | """ |
|
245 | """ | |
246 | name = 'IPython console session' |
|
246 | name = 'IPython console session' | |
247 | aliases = ['ipythonconsole'] |
|
247 | aliases = ['ipythonconsole'] | |
248 | mimetypes = ['text/x-ipython-console'] |
|
248 | mimetypes = ['text/x-ipython-console'] | |
249 |
|
249 | |||
250 | # The regexps used to determine what is input and what is output. |
|
250 | # The regexps used to determine what is input and what is output. | |
251 | # The default prompts for IPython are: |
|
251 | # The default prompts for IPython are: | |
252 | # |
|
252 | # | |
253 | # in = 'In [#]: ' |
|
253 | # in = 'In [#]: ' | |
254 | # continuation = ' .D.: ' |
|
254 | # continuation = ' .D.: ' | |
255 | # template = 'Out[#]: ' |
|
255 | # template = 'Out[#]: ' | |
256 | # |
|
256 | # | |
257 | # Where '#' is the 'prompt number' or 'execution count' and 'D' |
|
257 | # Where '#' is the 'prompt number' or 'execution count' and 'D' | |
258 | # D is a number of dots matching the width of the execution count |
|
258 | # D is a number of dots matching the width of the execution count | |
259 | # |
|
259 | # | |
260 | in1_regex = r'In \[[0-9]+\]: ' |
|
260 | in1_regex = r'In \[[0-9]+\]: ' | |
261 | in2_regex = r' \.\.+\.: ' |
|
261 | in2_regex = r' \.\.+\.: ' | |
262 | out_regex = r'Out\[[0-9]+\]: ' |
|
262 | out_regex = r'Out\[[0-9]+\]: ' | |
263 |
|
263 | |||
264 | #: The regex to determine when a traceback starts. |
|
264 | #: The regex to determine when a traceback starts. | |
265 | ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)') |
|
265 | ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)') | |
266 |
|
266 | |||
267 | def __init__(self, **options): |
|
267 | def __init__(self, **options): | |
268 | """Initialize the IPython console lexer. |
|
268 | """Initialize the IPython console lexer. | |
269 |
|
269 | |||
270 | Parameters |
|
270 | Parameters | |
271 | ---------- |
|
271 | ---------- | |
272 | python3 : bool |
|
272 | python3 : bool | |
273 | If `True`, then the console inputs are parsed using a Python 3 |
|
273 | If `True`, then the console inputs are parsed using a Python 3 | |
274 | lexer. Otherwise, they are parsed using a Python 2 lexer. |
|
274 | lexer. Otherwise, they are parsed using a Python 2 lexer. | |
275 | in1_regex : RegexObject |
|
275 | in1_regex : RegexObject | |
276 | The compiled regular expression used to detect the start |
|
276 | The compiled regular expression used to detect the start | |
277 | of inputs. Although the IPython configuration setting may have a |
|
277 | of inputs. Although the IPython configuration setting may have a | |
278 | trailing whitespace, do not include it in the regex. If `None`, |
|
278 | trailing whitespace, do not include it in the regex. If `None`, | |
279 | then the default input prompt is assumed. |
|
279 | then the default input prompt is assumed. | |
280 | in2_regex : RegexObject |
|
280 | in2_regex : RegexObject | |
281 | The compiled regular expression used to detect the continuation |
|
281 | The compiled regular expression used to detect the continuation | |
282 | of inputs. Although the IPython configuration setting may have a |
|
282 | of inputs. Although the IPython configuration setting may have a | |
283 | trailing whitespace, do not include it in the regex. If `None`, |
|
283 | trailing whitespace, do not include it in the regex. If `None`, | |
284 | then the default input prompt is assumed. |
|
284 | then the default input prompt is assumed. | |
285 | out_regex : RegexObject |
|
285 | out_regex : RegexObject | |
286 | The compiled regular expression used to detect outputs. If `None`, |
|
286 | The compiled regular expression used to detect outputs. If `None`, | |
287 | then the default output prompt is assumed. |
|
287 | then the default output prompt is assumed. | |
288 |
|
288 | |||
289 | """ |
|
289 | """ | |
290 | self.python3 = get_bool_opt(options, 'python3', False) |
|
290 | self.python3 = get_bool_opt(options, 'python3', False) | |
291 | if self.python3: |
|
291 | if self.python3: | |
292 | self.aliases = ['ipython3console'] |
|
292 | self.aliases = ['ipython3console'] | |
293 | else: |
|
293 | else: | |
294 | self.aliases = ['ipython2console', 'ipythonconsole'] |
|
294 | self.aliases = ['ipython2console', 'ipythonconsole'] | |
295 |
|
295 | |||
296 | in1_regex = options.get('in1_regex', self.in1_regex) |
|
296 | in1_regex = options.get('in1_regex', self.in1_regex) | |
297 | in2_regex = options.get('in2_regex', self.in2_regex) |
|
297 | in2_regex = options.get('in2_regex', self.in2_regex) | |
298 | out_regex = options.get('out_regex', self.out_regex) |
|
298 | out_regex = options.get('out_regex', self.out_regex) | |
299 |
|
299 | |||
300 | # So that we can work with input and output prompts which have been |
|
300 | # So that we can work with input and output prompts which have been | |
301 | # rstrip'd (possibly by editors) we also need rstrip'd variants. If |
|
301 | # rstrip'd (possibly by editors) we also need rstrip'd variants. If | |
302 | # we do not do this, then such prompts will be tagged as 'output'. |
|
302 | # we do not do this, then such prompts will be tagged as 'output'. | |
303 | # The reason can't just use the rstrip'd variants instead is because |
|
303 | # The reason can't just use the rstrip'd variants instead is because | |
304 | # we want any whitespace associated with the prompt to be inserted |
|
304 | # we want any whitespace associated with the prompt to be inserted | |
305 | # with the token. This allows formatted code to be modified so as hide |
|
305 | # with the token. This allows formatted code to be modified so as hide | |
306 | # the appearance of prompts, with the whitespace included. One example |
|
306 | # the appearance of prompts, with the whitespace included. One example | |
307 | # use of this is in copybutton.js from the standard lib Python docs. |
|
307 | # use of this is in copybutton.js from the standard lib Python docs. | |
308 | in1_regex_rstrip = in1_regex.rstrip() + '\n' |
|
308 | in1_regex_rstrip = in1_regex.rstrip() + '\n' | |
309 | in2_regex_rstrip = in2_regex.rstrip() + '\n' |
|
309 | in2_regex_rstrip = in2_regex.rstrip() + '\n' | |
310 | out_regex_rstrip = out_regex.rstrip() + '\n' |
|
310 | out_regex_rstrip = out_regex.rstrip() + '\n' | |
311 |
|
311 | |||
312 | # Compile and save them all. |
|
312 | # Compile and save them all. | |
313 | attrs = ['in1_regex', 'in2_regex', 'out_regex', |
|
313 | attrs = ['in1_regex', 'in2_regex', 'out_regex', | |
314 | 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip'] |
|
314 | 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip'] | |
315 | for attr in attrs: |
|
315 | for attr in attrs: | |
316 | self.__setattr__(attr, re.compile(locals()[attr])) |
|
316 | self.__setattr__(attr, re.compile(locals()[attr])) | |
317 |
|
317 | |||
318 | Lexer.__init__(self, **options) |
|
318 | Lexer.__init__(self, **options) | |
319 |
|
319 | |||
320 | if self.python3: |
|
320 | if self.python3: | |
321 | pylexer = IPython3Lexer |
|
321 | pylexer = IPython3Lexer | |
322 | tblexer = IPythonTracebackLexer |
|
322 | tblexer = IPythonTracebackLexer | |
323 | else: |
|
323 | else: | |
324 | pylexer = IPythonLexer |
|
324 | pylexer = IPythonLexer | |
325 | tblexer = IPythonTracebackLexer |
|
325 | tblexer = IPythonTracebackLexer | |
326 |
|
326 | |||
327 | self.pylexer = pylexer(**options) |
|
327 | self.pylexer = pylexer(**options) | |
328 | self.tblexer = tblexer(**options) |
|
328 | self.tblexer = tblexer(**options) | |
329 |
|
329 | |||
330 | self.reset() |
|
330 | self.reset() | |
331 |
|
331 | |||
332 | def reset(self): |
|
332 | def reset(self): | |
333 | self.mode = 'output' |
|
333 | self.mode = 'output' | |
334 | self.index = 0 |
|
334 | self.index = 0 | |
335 | self.buffer = u'' |
|
335 | self.buffer = u'' | |
336 | self.insertions = [] |
|
336 | self.insertions = [] | |
337 |
|
337 | |||
338 | def buffered_tokens(self): |
|
338 | def buffered_tokens(self): | |
339 | """ |
|
339 | """ | |
340 | Generator of unprocessed tokens after doing insertions and before |
|
340 | Generator of unprocessed tokens after doing insertions and before | |
341 | changing to a new state. |
|
341 | changing to a new state. | |
342 |
|
342 | |||
343 | """ |
|
343 | """ | |
344 | if self.mode == 'output': |
|
344 | if self.mode == 'output': | |
345 | tokens = [(0, Generic.Output, self.buffer)] |
|
345 | tokens = [(0, Generic.Output, self.buffer)] | |
346 | elif self.mode == 'input': |
|
346 | elif self.mode == 'input': | |
347 | tokens = self.pylexer.get_tokens_unprocessed(self.buffer) |
|
347 | tokens = self.pylexer.get_tokens_unprocessed(self.buffer) | |
348 | else: # traceback |
|
348 | else: # traceback | |
349 | tokens = self.tblexer.get_tokens_unprocessed(self.buffer) |
|
349 | tokens = self.tblexer.get_tokens_unprocessed(self.buffer) | |
350 |
|
350 | |||
351 | for i, t, v in do_insertions(self.insertions, tokens): |
|
351 | for i, t, v in do_insertions(self.insertions, tokens): | |
352 | # All token indexes are relative to the buffer. |
|
352 | # All token indexes are relative to the buffer. | |
353 | yield self.index + i, t, v |
|
353 | yield self.index + i, t, v | |
354 |
|
354 | |||
355 | # Clear it all |
|
355 | # Clear it all | |
356 | self.index += len(self.buffer) |
|
356 | self.index += len(self.buffer) | |
357 | self.buffer = u'' |
|
357 | self.buffer = u'' | |
358 | self.insertions = [] |
|
358 | self.insertions = [] | |
359 |
|
359 | |||
360 | def get_mci(self, line): |
|
360 | def get_mci(self, line): | |
361 | """ |
|
361 | """ | |
362 | Parses the line and returns a 3-tuple: (mode, code, insertion). |
|
362 | Parses the line and returns a 3-tuple: (mode, code, insertion). | |
363 |
|
363 | |||
364 | `mode` is the next mode (or state) of the lexer, and is always equal |
|
364 | `mode` is the next mode (or state) of the lexer, and is always equal | |
365 | to 'input', 'output', or 'tb'. |
|
365 | to 'input', 'output', or 'tb'. | |
366 |
|
366 | |||
367 | `code` is a portion of the line that should be added to the buffer |
|
367 | `code` is a portion of the line that should be added to the buffer | |
368 | corresponding to the next mode and eventually lexed by another lexer. |
|
368 | corresponding to the next mode and eventually lexed by another lexer. | |
369 | For example, `code` could be Python code if `mode` were 'input'. |
|
369 | For example, `code` could be Python code if `mode` were 'input'. | |
370 |
|
370 | |||
371 | `insertion` is a 3-tuple (index, token, text) representing an |
|
371 | `insertion` is a 3-tuple (index, token, text) representing an | |
372 | unprocessed "token" that will be inserted into the stream of tokens |
|
372 | unprocessed "token" that will be inserted into the stream of tokens | |
373 | that are created from the buffer once we change modes. This is usually |
|
373 | that are created from the buffer once we change modes. This is usually | |
374 | the input or output prompt. |
|
374 | the input or output prompt. | |
375 |
|
375 | |||
376 | In general, the next mode depends on current mode and on the contents |
|
376 | In general, the next mode depends on current mode and on the contents | |
377 | of `line`. |
|
377 | of `line`. | |
378 |
|
378 | |||
379 | """ |
|
379 | """ | |
380 | # To reduce the number of regex match checks, we have multiple |
|
380 | # To reduce the number of regex match checks, we have multiple | |
381 | # 'if' blocks instead of 'if-elif' blocks. |
|
381 | # 'if' blocks instead of 'if-elif' blocks. | |
382 |
|
382 | |||
383 | # Check for possible end of input |
|
383 | # Check for possible end of input | |
384 | in2_match = self.in2_regex.match(line) |
|
384 | in2_match = self.in2_regex.match(line) | |
385 | in2_match_rstrip = self.in2_regex_rstrip.match(line) |
|
385 | in2_match_rstrip = self.in2_regex_rstrip.match(line) | |
386 | if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \ |
|
386 | if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \ | |
387 | in2_match_rstrip: |
|
387 | in2_match_rstrip: | |
388 | end_input = True |
|
388 | end_input = True | |
389 | else: |
|
389 | else: | |
390 | end_input = False |
|
390 | end_input = False | |
391 | if end_input and self.mode != 'tb': |
|
391 | if end_input and self.mode != 'tb': | |
392 | # Only look for an end of input when not in tb mode. |
|
392 | # Only look for an end of input when not in tb mode. | |
393 | # An ellipsis could appear within the traceback. |
|
393 | # An ellipsis could appear within the traceback. | |
394 | mode = 'output' |
|
394 | mode = 'output' | |
395 | code = u'' |
|
395 | code = u'' | |
396 | insertion = (0, Generic.Prompt, line) |
|
396 | insertion = (0, Generic.Prompt, line) | |
397 | return mode, code, insertion |
|
397 | return mode, code, insertion | |
398 |
|
398 | |||
399 | # Check for output prompt |
|
399 | # Check for output prompt | |
400 | out_match = self.out_regex.match(line) |
|
400 | out_match = self.out_regex.match(line) | |
401 | out_match_rstrip = self.out_regex_rstrip.match(line) |
|
401 | out_match_rstrip = self.out_regex_rstrip.match(line) | |
402 | if out_match or out_match_rstrip: |
|
402 | if out_match or out_match_rstrip: | |
403 | mode = 'output' |
|
403 | mode = 'output' | |
404 | if out_match: |
|
404 | if out_match: | |
405 | idx = out_match.end() |
|
405 | idx = out_match.end() | |
406 | else: |
|
406 | else: | |
407 | idx = out_match_rstrip.end() |
|
407 | idx = out_match_rstrip.end() | |
408 | code = line[idx:] |
|
408 | code = line[idx:] | |
409 | # Use the 'heading' token for output. We cannot use Generic.Error |
|
409 | # Use the 'heading' token for output. We cannot use Generic.Error | |
410 | # since it would conflict with exceptions. |
|
410 | # since it would conflict with exceptions. | |
411 | insertion = (0, Generic.Heading, line[:idx]) |
|
411 | insertion = (0, Generic.Heading, line[:idx]) | |
412 | return mode, code, insertion |
|
412 | return mode, code, insertion | |
413 |
|
413 | |||
414 |
|
414 | |||
415 | # Check for input or continuation prompt (non stripped version) |
|
415 | # Check for input or continuation prompt (non stripped version) | |
416 | in1_match = self.in1_regex.match(line) |
|
416 | in1_match = self.in1_regex.match(line) | |
417 | if in1_match or (in2_match and self.mode != 'tb'): |
|
417 | if in1_match or (in2_match and self.mode != 'tb'): | |
418 | # New input or when not in tb, continued input. |
|
418 | # New input or when not in tb, continued input. | |
419 | # We do not check for continued input when in tb since it is |
|
419 | # We do not check for continued input when in tb since it is | |
420 | # allowable to replace a long stack with an ellipsis. |
|
420 | # allowable to replace a long stack with an ellipsis. | |
421 | mode = 'input' |
|
421 | mode = 'input' | |
422 | if in1_match: |
|
422 | if in1_match: | |
423 | idx = in1_match.end() |
|
423 | idx = in1_match.end() | |
424 | else: # in2_match |
|
424 | else: # in2_match | |
425 | idx = in2_match.end() |
|
425 | idx = in2_match.end() | |
426 | code = line[idx:] |
|
426 | code = line[idx:] | |
427 | insertion = (0, Generic.Prompt, line[:idx]) |
|
427 | insertion = (0, Generic.Prompt, line[:idx]) | |
428 | return mode, code, insertion |
|
428 | return mode, code, insertion | |
429 |
|
429 | |||
430 | # Check for input or continuation prompt (stripped version) |
|
430 | # Check for input or continuation prompt (stripped version) | |
431 | in1_match_rstrip = self.in1_regex_rstrip.match(line) |
|
431 | in1_match_rstrip = self.in1_regex_rstrip.match(line) | |
432 | if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'): |
|
432 | if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'): | |
433 | # New input or when not in tb, continued input. |
|
433 | # New input or when not in tb, continued input. | |
434 | # We do not check for continued input when in tb since it is |
|
434 | # We do not check for continued input when in tb since it is | |
435 | # allowable to replace a long stack with an ellipsis. |
|
435 | # allowable to replace a long stack with an ellipsis. | |
436 | mode = 'input' |
|
436 | mode = 'input' | |
437 | if in1_match_rstrip: |
|
437 | if in1_match_rstrip: | |
438 | idx = in1_match_rstrip.end() |
|
438 | idx = in1_match_rstrip.end() | |
439 | else: # in2_match |
|
439 | else: # in2_match | |
440 | idx = in2_match_rstrip.end() |
|
440 | idx = in2_match_rstrip.end() | |
441 | code = line[idx:] |
|
441 | code = line[idx:] | |
442 | insertion = (0, Generic.Prompt, line[:idx]) |
|
442 | insertion = (0, Generic.Prompt, line[:idx]) | |
443 | return mode, code, insertion |
|
443 | return mode, code, insertion | |
444 |
|
444 | |||
445 | # Check for traceback |
|
445 | # Check for traceback | |
446 | if self.ipytb_start.match(line): |
|
446 | if self.ipytb_start.match(line): | |
447 | mode = 'tb' |
|
447 | mode = 'tb' | |
448 | code = line |
|
448 | code = line | |
449 | insertion = None |
|
449 | insertion = None | |
450 | return mode, code, insertion |
|
450 | return mode, code, insertion | |
451 |
|
451 | |||
452 | # All other stuff... |
|
452 | # All other stuff... | |
453 | if self.mode in ('input', 'output'): |
|
453 | if self.mode in ('input', 'output'): | |
454 | # We assume all other text is output. Multiline input that |
|
454 | # We assume all other text is output. Multiline input that | |
455 | # does not use the continuation marker cannot be detected. |
|
455 | # does not use the continuation marker cannot be detected. | |
456 | # For example, the 3 in the following is clearly output: |
|
456 | # For example, the 3 in the following is clearly output: | |
457 | # |
|
457 | # | |
458 | # In [1]: print 3 |
|
458 | # In [1]: print 3 | |
459 | # 3 |
|
459 | # 3 | |
460 | # |
|
460 | # | |
461 | # But the following second line is part of the input: |
|
461 | # But the following second line is part of the input: | |
462 | # |
|
462 | # | |
463 | # In [2]: while True: |
|
463 | # In [2]: while True: | |
464 | # print True |
|
464 | # print True | |
465 | # |
|
465 | # | |
466 | # In both cases, the 2nd line will be 'output'. |
|
466 | # In both cases, the 2nd line will be 'output'. | |
467 | # |
|
467 | # | |
468 | mode = 'output' |
|
468 | mode = 'output' | |
469 | else: |
|
469 | else: | |
470 | mode = 'tb' |
|
470 | mode = 'tb' | |
471 |
|
471 | |||
472 | code = line |
|
472 | code = line | |
473 | insertion = None |
|
473 | insertion = None | |
474 |
|
474 | |||
475 | return mode, code, insertion |
|
475 | return mode, code, insertion | |
476 |
|
476 | |||
477 | def get_tokens_unprocessed(self, text): |
|
477 | def get_tokens_unprocessed(self, text): | |
478 | self.reset() |
|
478 | self.reset() | |
479 | for match in line_re.finditer(text): |
|
479 | for match in line_re.finditer(text): | |
480 | line = match.group() |
|
480 | line = match.group() | |
481 | mode, code, insertion = self.get_mci(line) |
|
481 | mode, code, insertion = self.get_mci(line) | |
482 |
|
482 | |||
483 | if mode != self.mode: |
|
483 | if mode != self.mode: | |
484 | # Yield buffered tokens before transitioning to new mode. |
|
484 | # Yield buffered tokens before transitioning to new mode. | |
485 | for token in self.buffered_tokens(): |
|
485 | for token in self.buffered_tokens(): | |
486 | yield token |
|
486 | yield token | |
487 | self.mode = mode |
|
487 | self.mode = mode | |
488 |
|
488 | |||
489 | if insertion: |
|
489 | if insertion: | |
490 | self.insertions.append((len(self.buffer), [insertion])) |
|
490 | self.insertions.append((len(self.buffer), [insertion])) | |
491 | self.buffer += code |
|
491 | self.buffer += code | |
492 |
|
492 | |||
493 | for token in self.buffered_tokens(): |
|
493 | for token in self.buffered_tokens(): | |
494 | yield token |
|
494 | yield token | |
495 |
|
495 | |||
496 | class IPyLexer(Lexer): |
|
496 | class IPyLexer(Lexer): | |
497 | r""" |
|
497 | r""" | |
498 | Primary lexer for all IPython-like code. |
|
498 | Primary lexer for all IPython-like code. | |
499 |
|
499 | |||
500 | This is a simple helper lexer. If the first line of the text begins with |
|
500 | This is a simple helper lexer. If the first line of the text begins with | |
501 | "In \[[0-9]+\]:", then the entire text is parsed with an IPython console |
|
501 | "In \[[0-9]+\]:", then the entire text is parsed with an IPython console | |
502 | lexer. If not, then the entire text is parsed with an IPython lexer. |
|
502 | lexer. If not, then the entire text is parsed with an IPython lexer. | |
503 |
|
503 | |||
504 | The goal is to reduce the number of lexers that are registered |
|
504 | The goal is to reduce the number of lexers that are registered | |
505 | with Pygments. |
|
505 | with Pygments. | |
506 |
|
506 | |||
507 | """ |
|
507 | """ | |
508 | name = 'IPy session' |
|
508 | name = 'IPy session' | |
509 | aliases = ['ipy'] |
|
509 | aliases = ['ipy'] | |
510 |
|
510 | |||
511 | def __init__(self, **options): |
|
511 | def __init__(self, **options): | |
512 | self.python3 = get_bool_opt(options, 'python3', False) |
|
512 | self.python3 = get_bool_opt(options, 'python3', False) | |
513 | if self.python3: |
|
513 | if self.python3: | |
514 | self.aliases = ['ipy3'] |
|
514 | self.aliases = ['ipy3'] | |
515 | else: |
|
515 | else: | |
516 | self.aliases = ['ipy2', 'ipy'] |
|
516 | self.aliases = ['ipy2', 'ipy'] | |
517 |
|
517 | |||
518 | Lexer.__init__(self, **options) |
|
518 | Lexer.__init__(self, **options) | |
519 |
|
519 | |||
520 | self.IPythonLexer = IPythonLexer(**options) |
|
520 | self.IPythonLexer = IPythonLexer(**options) | |
521 | self.IPythonConsoleLexer = IPythonConsoleLexer(**options) |
|
521 | self.IPythonConsoleLexer = IPythonConsoleLexer(**options) | |
522 |
|
522 | |||
523 | def get_tokens_unprocessed(self, text): |
|
523 | def get_tokens_unprocessed(self, text): | |
524 | # Search for the input prompt anywhere...this allows code blocks to |
|
524 | # Search for the input prompt anywhere...this allows code blocks to | |
525 | # begin with comments as well. |
|
525 | # begin with comments as well. | |
526 | if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL): |
|
526 | if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL): | |
527 | lex = self.IPythonConsoleLexer |
|
527 | lex = self.IPythonConsoleLexer | |
528 | else: |
|
528 | else: | |
529 | lex = self.IPythonLexer |
|
529 | lex = self.IPythonLexer | |
530 | for token in lex.get_tokens_unprocessed(text): |
|
530 | for token in lex.get_tokens_unprocessed(text): | |
531 | yield token |
|
531 | yield token | |
532 |
|
532 |
@@ -1,871 +1,860 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """ |
|
2 | """ | |
3 | Python advanced pretty printer. This pretty printer is intended to |
|
3 | Python advanced pretty printer. This pretty printer is intended to | |
4 | replace the old `pprint` python module which does not allow developers |
|
4 | replace the old `pprint` python module which does not allow developers | |
5 | to provide their own pretty print callbacks. |
|
5 | to provide their own pretty print callbacks. | |
6 |
|
6 | |||
7 | This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`. |
|
7 | This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`. | |
8 |
|
8 | |||
9 |
|
9 | |||
10 | Example Usage |
|
10 | Example Usage | |
11 | ------------- |
|
11 | ------------- | |
12 |
|
12 | |||
13 | To directly print the representation of an object use `pprint`:: |
|
13 | To directly print the representation of an object use `pprint`:: | |
14 |
|
14 | |||
15 | from pretty import pprint |
|
15 | from pretty import pprint | |
16 | pprint(complex_object) |
|
16 | pprint(complex_object) | |
17 |
|
17 | |||
18 | To get a string of the output use `pretty`:: |
|
18 | To get a string of the output use `pretty`:: | |
19 |
|
19 | |||
20 | from pretty import pretty |
|
20 | from pretty import pretty | |
21 | string = pretty(complex_object) |
|
21 | string = pretty(complex_object) | |
22 |
|
22 | |||
23 |
|
23 | |||
24 | Extending |
|
24 | Extending | |
25 | --------- |
|
25 | --------- | |
26 |
|
26 | |||
27 | The pretty library allows developers to add pretty printing rules for their |
|
27 | The pretty library allows developers to add pretty printing rules for their | |
28 | own objects. This process is straightforward. All you have to do is to |
|
28 | own objects. This process is straightforward. All you have to do is to | |
29 | add a `_repr_pretty_` method to your object and call the methods on the |
|
29 | add a `_repr_pretty_` method to your object and call the methods on the | |
30 | pretty printer passed:: |
|
30 | pretty printer passed:: | |
31 |
|
31 | |||
32 | class MyObject(object): |
|
32 | class MyObject(object): | |
33 |
|
33 | |||
34 | def _repr_pretty_(self, p, cycle): |
|
34 | def _repr_pretty_(self, p, cycle): | |
35 | ... |
|
35 | ... | |
36 |
|
36 | |||
37 | Here is an example implementation of a `_repr_pretty_` method for a list |
|
37 | Here is an example implementation of a `_repr_pretty_` method for a list | |
38 | subclass:: |
|
38 | subclass:: | |
39 |
|
39 | |||
40 | class MyList(list): |
|
40 | class MyList(list): | |
41 |
|
41 | |||
42 | def _repr_pretty_(self, p, cycle): |
|
42 | def _repr_pretty_(self, p, cycle): | |
43 | if cycle: |
|
43 | if cycle: | |
44 | p.text('MyList(...)') |
|
44 | p.text('MyList(...)') | |
45 | else: |
|
45 | else: | |
46 | with p.group(8, 'MyList([', '])'): |
|
46 | with p.group(8, 'MyList([', '])'): | |
47 | for idx, item in enumerate(self): |
|
47 | for idx, item in enumerate(self): | |
48 | if idx: |
|
48 | if idx: | |
49 | p.text(',') |
|
49 | p.text(',') | |
50 | p.breakable() |
|
50 | p.breakable() | |
51 | p.pretty(item) |
|
51 | p.pretty(item) | |
52 |
|
52 | |||
53 | The `cycle` parameter is `True` if pretty detected a cycle. You *have* to |
|
53 | The `cycle` parameter is `True` if pretty detected a cycle. You *have* to | |
54 | react to that or the result is an infinite loop. `p.text()` just adds |
|
54 | react to that or the result is an infinite loop. `p.text()` just adds | |
55 | non breaking text to the output, `p.breakable()` either adds a whitespace |
|
55 | non breaking text to the output, `p.breakable()` either adds a whitespace | |
56 | or breaks here. If you pass it an argument it's used instead of the |
|
56 | or breaks here. If you pass it an argument it's used instead of the | |
57 | default space. `p.pretty` prettyprints another object using the pretty print |
|
57 | default space. `p.pretty` prettyprints another object using the pretty print | |
58 | method. |
|
58 | method. | |
59 |
|
59 | |||
60 | The first parameter to the `group` function specifies the extra indentation |
|
60 | The first parameter to the `group` function specifies the extra indentation | |
61 | of the next line. In this example the next item will either be on the same |
|
61 | of the next line. In this example the next item will either be on the same | |
62 | line (if the items are short enough) or aligned with the right edge of the |
|
62 | line (if the items are short enough) or aligned with the right edge of the | |
63 | opening bracket of `MyList`. |
|
63 | opening bracket of `MyList`. | |
64 |
|
64 | |||
65 | If you just want to indent something you can use the group function |
|
65 | If you just want to indent something you can use the group function | |
66 | without open / close parameters. You can also use this code:: |
|
66 | without open / close parameters. You can also use this code:: | |
67 |
|
67 | |||
68 | with p.indent(2): |
|
68 | with p.indent(2): | |
69 | ... |
|
69 | ... | |
70 |
|
70 | |||
71 | Inheritance diagram: |
|
71 | Inheritance diagram: | |
72 |
|
72 | |||
73 | .. inheritance-diagram:: IPython.lib.pretty |
|
73 | .. inheritance-diagram:: IPython.lib.pretty | |
74 | :parts: 3 |
|
74 | :parts: 3 | |
75 |
|
75 | |||
76 | :copyright: 2007 by Armin Ronacher. |
|
76 | :copyright: 2007 by Armin Ronacher. | |
77 | Portions (c) 2009 by Robert Kern. |
|
77 | Portions (c) 2009 by Robert Kern. | |
78 | :license: BSD License. |
|
78 | :license: BSD License. | |
79 | """ |
|
79 | """ | |
80 |
|
80 | |||
81 | from contextlib import contextmanager |
|
81 | from contextlib import contextmanager | |
82 | import datetime |
|
82 | import datetime | |
83 | import os |
|
83 | import os | |
84 | import re |
|
84 | import re | |
85 | import sys |
|
85 | import sys | |
86 | import types |
|
86 | import types | |
87 | from collections import deque |
|
87 | from collections import deque | |
88 | from inspect import signature |
|
88 | from inspect import signature | |
89 | from io import StringIO |
|
89 | from io import StringIO | |
90 | from warnings import warn |
|
90 | from warnings import warn | |
91 |
|
91 | |||
92 | from IPython.utils.decorators import undoc |
|
92 | from IPython.utils.decorators import undoc | |
93 | from IPython.utils.py3compat import PYPY |
|
93 | from IPython.utils.py3compat import PYPY | |
94 |
|
94 | |||
95 | __all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter', |
|
95 | __all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter', | |
96 | 'for_type', 'for_type_by_name'] |
|
96 | 'for_type', 'for_type_by_name'] | |
97 |
|
97 | |||
98 |
|
98 | |||
99 | MAX_SEQ_LENGTH = 1000 |
|
99 | MAX_SEQ_LENGTH = 1000 | |
100 | _re_pattern_type = type(re.compile('')) |
|
100 | _re_pattern_type = type(re.compile('')) | |
101 |
|
101 | |||
102 | def _safe_getattr(obj, attr, default=None): |
|
102 | def _safe_getattr(obj, attr, default=None): | |
103 | """Safe version of getattr. |
|
103 | """Safe version of getattr. | |
104 |
|
104 | |||
105 | Same as getattr, but will return ``default`` on any Exception, |
|
105 | Same as getattr, but will return ``default`` on any Exception, | |
106 | rather than raising. |
|
106 | rather than raising. | |
107 | """ |
|
107 | """ | |
108 | try: |
|
108 | try: | |
109 | return getattr(obj, attr, default) |
|
109 | return getattr(obj, attr, default) | |
110 | except Exception: |
|
110 | except Exception: | |
111 | return default |
|
111 | return default | |
112 |
|
112 | |||
113 | @undoc |
|
113 | @undoc | |
114 | class CUnicodeIO(StringIO): |
|
114 | class CUnicodeIO(StringIO): | |
115 | def __init__(self, *args, **kwargs): |
|
115 | def __init__(self, *args, **kwargs): | |
116 | super().__init__(*args, **kwargs) |
|
116 | super().__init__(*args, **kwargs) | |
117 | warn(("CUnicodeIO is deprecated since IPython 6.0. " |
|
117 | warn(("CUnicodeIO is deprecated since IPython 6.0. " | |
118 | "Please use io.StringIO instead."), |
|
118 | "Please use io.StringIO instead."), | |
119 | DeprecationWarning, stacklevel=2) |
|
119 | DeprecationWarning, stacklevel=2) | |
120 |
|
120 | |||
121 | def _sorted_for_pprint(items): |
|
121 | def _sorted_for_pprint(items): | |
122 | """ |
|
122 | """ | |
123 | Sort the given items for pretty printing. Since some predictable |
|
123 | Sort the given items for pretty printing. Since some predictable | |
124 | sorting is better than no sorting at all, we sort on the string |
|
124 | sorting is better than no sorting at all, we sort on the string | |
125 | representation if normal sorting fails. |
|
125 | representation if normal sorting fails. | |
126 | """ |
|
126 | """ | |
127 | items = list(items) |
|
127 | items = list(items) | |
128 | try: |
|
128 | try: | |
129 | return sorted(items) |
|
129 | return sorted(items) | |
130 | except Exception: |
|
130 | except Exception: | |
131 | try: |
|
131 | try: | |
132 | return sorted(items, key=str) |
|
132 | return sorted(items, key=str) | |
133 | except Exception: |
|
133 | except Exception: | |
134 | return items |
|
134 | return items | |
135 |
|
135 | |||
136 | def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): |
|
136 | def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): | |
137 | """ |
|
137 | """ | |
138 | Pretty print the object's representation. |
|
138 | Pretty print the object's representation. | |
139 | """ |
|
139 | """ | |
140 | stream = StringIO() |
|
140 | stream = StringIO() | |
141 | printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length) |
|
141 | printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length) | |
142 | printer.pretty(obj) |
|
142 | printer.pretty(obj) | |
143 | printer.flush() |
|
143 | printer.flush() | |
144 | return stream.getvalue() |
|
144 | return stream.getvalue() | |
145 |
|
145 | |||
146 |
|
146 | |||
147 | def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): |
|
147 | def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): | |
148 | """ |
|
148 | """ | |
149 | Like `pretty` but print to stdout. |
|
149 | Like `pretty` but print to stdout. | |
150 | """ |
|
150 | """ | |
151 | printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length) |
|
151 | printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length) | |
152 | printer.pretty(obj) |
|
152 | printer.pretty(obj) | |
153 | printer.flush() |
|
153 | printer.flush() | |
154 | sys.stdout.write(newline) |
|
154 | sys.stdout.write(newline) | |
155 | sys.stdout.flush() |
|
155 | sys.stdout.flush() | |
156 |
|
156 | |||
157 | class _PrettyPrinterBase(object): |
|
157 | class _PrettyPrinterBase(object): | |
158 |
|
158 | |||
159 | @contextmanager |
|
159 | @contextmanager | |
160 | def indent(self, indent): |
|
160 | def indent(self, indent): | |
161 | """with statement support for indenting/dedenting.""" |
|
161 | """with statement support for indenting/dedenting.""" | |
162 | self.indentation += indent |
|
162 | self.indentation += indent | |
163 | try: |
|
163 | try: | |
164 | yield |
|
164 | yield | |
165 | finally: |
|
165 | finally: | |
166 | self.indentation -= indent |
|
166 | self.indentation -= indent | |
167 |
|
167 | |||
168 | @contextmanager |
|
168 | @contextmanager | |
169 | def group(self, indent=0, open='', close=''): |
|
169 | def group(self, indent=0, open='', close=''): | |
170 | """like begin_group / end_group but for the with statement.""" |
|
170 | """like begin_group / end_group but for the with statement.""" | |
171 | self.begin_group(indent, open) |
|
171 | self.begin_group(indent, open) | |
172 | try: |
|
172 | try: | |
173 | yield |
|
173 | yield | |
174 | finally: |
|
174 | finally: | |
175 | self.end_group(indent, close) |
|
175 | self.end_group(indent, close) | |
176 |
|
176 | |||
177 | class PrettyPrinter(_PrettyPrinterBase): |
|
177 | class PrettyPrinter(_PrettyPrinterBase): | |
178 | """ |
|
178 | """ | |
179 | Baseclass for the `RepresentationPrinter` prettyprinter that is used to |
|
179 | Baseclass for the `RepresentationPrinter` prettyprinter that is used to | |
180 | generate pretty reprs of objects. Contrary to the `RepresentationPrinter` |
|
180 | generate pretty reprs of objects. Contrary to the `RepresentationPrinter` | |
181 | this printer knows nothing about the default pprinters or the `_repr_pretty_` |
|
181 | this printer knows nothing about the default pprinters or the `_repr_pretty_` | |
182 | callback method. |
|
182 | callback method. | |
183 | """ |
|
183 | """ | |
184 |
|
184 | |||
185 | def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): |
|
185 | def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): | |
186 | self.output = output |
|
186 | self.output = output | |
187 | self.max_width = max_width |
|
187 | self.max_width = max_width | |
188 | self.newline = newline |
|
188 | self.newline = newline | |
189 | self.max_seq_length = max_seq_length |
|
189 | self.max_seq_length = max_seq_length | |
190 | self.output_width = 0 |
|
190 | self.output_width = 0 | |
191 | self.buffer_width = 0 |
|
191 | self.buffer_width = 0 | |
192 | self.buffer = deque() |
|
192 | self.buffer = deque() | |
193 |
|
193 | |||
194 | root_group = Group(0) |
|
194 | root_group = Group(0) | |
195 | self.group_stack = [root_group] |
|
195 | self.group_stack = [root_group] | |
196 | self.group_queue = GroupQueue(root_group) |
|
196 | self.group_queue = GroupQueue(root_group) | |
197 | self.indentation = 0 |
|
197 | self.indentation = 0 | |
198 |
|
198 | |||
199 | def _break_one_group(self, group): |
|
199 | def _break_one_group(self, group): | |
200 | while group.breakables: |
|
200 | while group.breakables: | |
201 | x = self.buffer.popleft() |
|
201 | x = self.buffer.popleft() | |
202 | self.output_width = x.output(self.output, self.output_width) |
|
202 | self.output_width = x.output(self.output, self.output_width) | |
203 | self.buffer_width -= x.width |
|
203 | self.buffer_width -= x.width | |
204 | while self.buffer and isinstance(self.buffer[0], Text): |
|
204 | while self.buffer and isinstance(self.buffer[0], Text): | |
205 | x = self.buffer.popleft() |
|
205 | x = self.buffer.popleft() | |
206 | self.output_width = x.output(self.output, self.output_width) |
|
206 | self.output_width = x.output(self.output, self.output_width) | |
207 | self.buffer_width -= x.width |
|
207 | self.buffer_width -= x.width | |
208 |
|
208 | |||
209 | def _break_outer_groups(self): |
|
209 | def _break_outer_groups(self): | |
210 | while self.max_width < self.output_width + self.buffer_width: |
|
210 | while self.max_width < self.output_width + self.buffer_width: | |
211 | group = self.group_queue.deq() |
|
211 | group = self.group_queue.deq() | |
212 | if not group: |
|
212 | if not group: | |
213 | return |
|
213 | return | |
214 | self._break_one_group(group) |
|
214 | self._break_one_group(group) | |
215 |
|
215 | |||
216 | def text(self, obj): |
|
216 | def text(self, obj): | |
217 | """Add literal text to the output.""" |
|
217 | """Add literal text to the output.""" | |
218 | width = len(obj) |
|
218 | width = len(obj) | |
219 | if self.buffer: |
|
219 | if self.buffer: | |
220 | text = self.buffer[-1] |
|
220 | text = self.buffer[-1] | |
221 | if not isinstance(text, Text): |
|
221 | if not isinstance(text, Text): | |
222 | text = Text() |
|
222 | text = Text() | |
223 | self.buffer.append(text) |
|
223 | self.buffer.append(text) | |
224 | text.add(obj, width) |
|
224 | text.add(obj, width) | |
225 | self.buffer_width += width |
|
225 | self.buffer_width += width | |
226 | self._break_outer_groups() |
|
226 | self._break_outer_groups() | |
227 | else: |
|
227 | else: | |
228 | self.output.write(obj) |
|
228 | self.output.write(obj) | |
229 | self.output_width += width |
|
229 | self.output_width += width | |
230 |
|
230 | |||
231 | def breakable(self, sep=' '): |
|
231 | def breakable(self, sep=' '): | |
232 | """ |
|
232 | """ | |
233 | Add a breakable separator to the output. This does not mean that it |
|
233 | Add a breakable separator to the output. This does not mean that it | |
234 | will automatically break here. If no breaking on this position takes |
|
234 | will automatically break here. If no breaking on this position takes | |
235 | place the `sep` is inserted which default to one space. |
|
235 | place the `sep` is inserted which default to one space. | |
236 | """ |
|
236 | """ | |
237 | width = len(sep) |
|
237 | width = len(sep) | |
238 | group = self.group_stack[-1] |
|
238 | group = self.group_stack[-1] | |
239 | if group.want_break: |
|
239 | if group.want_break: | |
240 | self.flush() |
|
240 | self.flush() | |
241 | self.output.write(self.newline) |
|
241 | self.output.write(self.newline) | |
242 | self.output.write(' ' * self.indentation) |
|
242 | self.output.write(' ' * self.indentation) | |
243 | self.output_width = self.indentation |
|
243 | self.output_width = self.indentation | |
244 | self.buffer_width = 0 |
|
244 | self.buffer_width = 0 | |
245 | else: |
|
245 | else: | |
246 | self.buffer.append(Breakable(sep, width, self)) |
|
246 | self.buffer.append(Breakable(sep, width, self)) | |
247 | self.buffer_width += width |
|
247 | self.buffer_width += width | |
248 | self._break_outer_groups() |
|
248 | self._break_outer_groups() | |
249 |
|
249 | |||
250 | def break_(self): |
|
250 | def break_(self): | |
251 | """ |
|
251 | """ | |
252 | Explicitly insert a newline into the output, maintaining correct indentation. |
|
252 | Explicitly insert a newline into the output, maintaining correct indentation. | |
253 | """ |
|
253 | """ | |
254 | group = self.group_queue.deq() |
|
254 | group = self.group_queue.deq() | |
255 | if group: |
|
255 | if group: | |
256 | self._break_one_group(group) |
|
256 | self._break_one_group(group) | |
257 | self.flush() |
|
257 | self.flush() | |
258 | self.output.write(self.newline) |
|
258 | self.output.write(self.newline) | |
259 | self.output.write(' ' * self.indentation) |
|
259 | self.output.write(' ' * self.indentation) | |
260 | self.output_width = self.indentation |
|
260 | self.output_width = self.indentation | |
261 | self.buffer_width = 0 |
|
261 | self.buffer_width = 0 | |
262 |
|
262 | |||
263 |
|
263 | |||
264 | def begin_group(self, indent=0, open=''): |
|
264 | def begin_group(self, indent=0, open=''): | |
265 | """ |
|
265 | """ | |
266 | Begin a group. If you want support for python < 2.5 which doesn't has |
|
266 | Begin a group. | |
267 | the with statement this is the preferred way: |
|
|||
268 |
|
||||
269 | p.begin_group(1, '{') |
|
|||
270 | ... |
|
|||
271 | p.end_group(1, '}') |
|
|||
272 |
|
||||
273 | The python 2.5 expression would be this: |
|
|||
274 |
|
||||
275 | with p.group(1, '{', '}'): |
|
|||
276 | ... |
|
|||
277 |
|
||||
278 | The first parameter specifies the indentation for the next line (usually |
|
267 | The first parameter specifies the indentation for the next line (usually | |
279 | the width of the opening text), the second the opening text. All |
|
268 | the width of the opening text), the second the opening text. All | |
280 | parameters are optional. |
|
269 | parameters are optional. | |
281 | """ |
|
270 | """ | |
282 | if open: |
|
271 | if open: | |
283 | self.text(open) |
|
272 | self.text(open) | |
284 | group = Group(self.group_stack[-1].depth + 1) |
|
273 | group = Group(self.group_stack[-1].depth + 1) | |
285 | self.group_stack.append(group) |
|
274 | self.group_stack.append(group) | |
286 | self.group_queue.enq(group) |
|
275 | self.group_queue.enq(group) | |
287 | self.indentation += indent |
|
276 | self.indentation += indent | |
288 |
|
277 | |||
289 | def _enumerate(self, seq): |
|
278 | def _enumerate(self, seq): | |
290 | """like enumerate, but with an upper limit on the number of items""" |
|
279 | """like enumerate, but with an upper limit on the number of items""" | |
291 | for idx, x in enumerate(seq): |
|
280 | for idx, x in enumerate(seq): | |
292 | if self.max_seq_length and idx >= self.max_seq_length: |
|
281 | if self.max_seq_length and idx >= self.max_seq_length: | |
293 | self.text(',') |
|
282 | self.text(',') | |
294 | self.breakable() |
|
283 | self.breakable() | |
295 | self.text('...') |
|
284 | self.text('...') | |
296 | return |
|
285 | return | |
297 | yield idx, x |
|
286 | yield idx, x | |
298 |
|
287 | |||
299 | def end_group(self, dedent=0, close=''): |
|
288 | def end_group(self, dedent=0, close=''): | |
300 | """End a group. See `begin_group` for more details.""" |
|
289 | """End a group. See `begin_group` for more details.""" | |
301 | self.indentation -= dedent |
|
290 | self.indentation -= dedent | |
302 | group = self.group_stack.pop() |
|
291 | group = self.group_stack.pop() | |
303 | if not group.breakables: |
|
292 | if not group.breakables: | |
304 | self.group_queue.remove(group) |
|
293 | self.group_queue.remove(group) | |
305 | if close: |
|
294 | if close: | |
306 | self.text(close) |
|
295 | self.text(close) | |
307 |
|
296 | |||
308 | def flush(self): |
|
297 | def flush(self): | |
309 | """Flush data that is left in the buffer.""" |
|
298 | """Flush data that is left in the buffer.""" | |
310 | for data in self.buffer: |
|
299 | for data in self.buffer: | |
311 | self.output_width += data.output(self.output, self.output_width) |
|
300 | self.output_width += data.output(self.output, self.output_width) | |
312 | self.buffer.clear() |
|
301 | self.buffer.clear() | |
313 | self.buffer_width = 0 |
|
302 | self.buffer_width = 0 | |
314 |
|
303 | |||
315 |
|
304 | |||
316 | def _get_mro(obj_class): |
|
305 | def _get_mro(obj_class): | |
317 | """ Get a reasonable method resolution order of a class and its superclasses |
|
306 | """ Get a reasonable method resolution order of a class and its superclasses | |
318 | for both old-style and new-style classes. |
|
307 | for both old-style and new-style classes. | |
319 | """ |
|
308 | """ | |
320 | if not hasattr(obj_class, '__mro__'): |
|
309 | if not hasattr(obj_class, '__mro__'): | |
321 | # Old-style class. Mix in object to make a fake new-style class. |
|
310 | # Old-style class. Mix in object to make a fake new-style class. | |
322 | try: |
|
311 | try: | |
323 | obj_class = type(obj_class.__name__, (obj_class, object), {}) |
|
312 | obj_class = type(obj_class.__name__, (obj_class, object), {}) | |
324 | except TypeError: |
|
313 | except TypeError: | |
325 | # Old-style extension type that does not descend from object. |
|
314 | # Old-style extension type that does not descend from object. | |
326 | # FIXME: try to construct a more thorough MRO. |
|
315 | # FIXME: try to construct a more thorough MRO. | |
327 | mro = [obj_class] |
|
316 | mro = [obj_class] | |
328 | else: |
|
317 | else: | |
329 | mro = obj_class.__mro__[1:-1] |
|
318 | mro = obj_class.__mro__[1:-1] | |
330 | else: |
|
319 | else: | |
331 | mro = obj_class.__mro__ |
|
320 | mro = obj_class.__mro__ | |
332 | return mro |
|
321 | return mro | |
333 |
|
322 | |||
334 |
|
323 | |||
335 | class RepresentationPrinter(PrettyPrinter): |
|
324 | class RepresentationPrinter(PrettyPrinter): | |
336 | """ |
|
325 | """ | |
337 | Special pretty printer that has a `pretty` method that calls the pretty |
|
326 | Special pretty printer that has a `pretty` method that calls the pretty | |
338 | printer for a python object. |
|
327 | printer for a python object. | |
339 |
|
328 | |||
340 | This class stores processing data on `self` so you must *never* use |
|
329 | This class stores processing data on `self` so you must *never* use | |
341 | this class in a threaded environment. Always lock it or reinstanciate |
|
330 | this class in a threaded environment. Always lock it or reinstanciate | |
342 | it. |
|
331 | it. | |
343 |
|
332 | |||
344 | Instances also have a verbose flag callbacks can access to control their |
|
333 | Instances also have a verbose flag callbacks can access to control their | |
345 | output. For example the default instance repr prints all attributes and |
|
334 | output. For example the default instance repr prints all attributes and | |
346 | methods that are not prefixed by an underscore if the printer is in |
|
335 | methods that are not prefixed by an underscore if the printer is in | |
347 | verbose mode. |
|
336 | verbose mode. | |
348 | """ |
|
337 | """ | |
349 |
|
338 | |||
350 | def __init__(self, output, verbose=False, max_width=79, newline='\n', |
|
339 | def __init__(self, output, verbose=False, max_width=79, newline='\n', | |
351 | singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, |
|
340 | singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, | |
352 | max_seq_length=MAX_SEQ_LENGTH): |
|
341 | max_seq_length=MAX_SEQ_LENGTH): | |
353 |
|
342 | |||
354 | PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length) |
|
343 | PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length) | |
355 | self.verbose = verbose |
|
344 | self.verbose = verbose | |
356 | self.stack = [] |
|
345 | self.stack = [] | |
357 | if singleton_pprinters is None: |
|
346 | if singleton_pprinters is None: | |
358 | singleton_pprinters = _singleton_pprinters.copy() |
|
347 | singleton_pprinters = _singleton_pprinters.copy() | |
359 | self.singleton_pprinters = singleton_pprinters |
|
348 | self.singleton_pprinters = singleton_pprinters | |
360 | if type_pprinters is None: |
|
349 | if type_pprinters is None: | |
361 | type_pprinters = _type_pprinters.copy() |
|
350 | type_pprinters = _type_pprinters.copy() | |
362 | self.type_pprinters = type_pprinters |
|
351 | self.type_pprinters = type_pprinters | |
363 | if deferred_pprinters is None: |
|
352 | if deferred_pprinters is None: | |
364 | deferred_pprinters = _deferred_type_pprinters.copy() |
|
353 | deferred_pprinters = _deferred_type_pprinters.copy() | |
365 | self.deferred_pprinters = deferred_pprinters |
|
354 | self.deferred_pprinters = deferred_pprinters | |
366 |
|
355 | |||
367 | def pretty(self, obj): |
|
356 | def pretty(self, obj): | |
368 | """Pretty print the given object.""" |
|
357 | """Pretty print the given object.""" | |
369 | obj_id = id(obj) |
|
358 | obj_id = id(obj) | |
370 | cycle = obj_id in self.stack |
|
359 | cycle = obj_id in self.stack | |
371 | self.stack.append(obj_id) |
|
360 | self.stack.append(obj_id) | |
372 | self.begin_group() |
|
361 | self.begin_group() | |
373 | try: |
|
362 | try: | |
374 | obj_class = _safe_getattr(obj, '__class__', None) or type(obj) |
|
363 | obj_class = _safe_getattr(obj, '__class__', None) or type(obj) | |
375 | # First try to find registered singleton printers for the type. |
|
364 | # First try to find registered singleton printers for the type. | |
376 | try: |
|
365 | try: | |
377 | printer = self.singleton_pprinters[obj_id] |
|
366 | printer = self.singleton_pprinters[obj_id] | |
378 | except (TypeError, KeyError): |
|
367 | except (TypeError, KeyError): | |
379 | pass |
|
368 | pass | |
380 | else: |
|
369 | else: | |
381 | return printer(obj, self, cycle) |
|
370 | return printer(obj, self, cycle) | |
382 | # Next walk the mro and check for either: |
|
371 | # Next walk the mro and check for either: | |
383 | # 1) a registered printer |
|
372 | # 1) a registered printer | |
384 | # 2) a _repr_pretty_ method |
|
373 | # 2) a _repr_pretty_ method | |
385 | for cls in _get_mro(obj_class): |
|
374 | for cls in _get_mro(obj_class): | |
386 | if cls in self.type_pprinters: |
|
375 | if cls in self.type_pprinters: | |
387 | # printer registered in self.type_pprinters |
|
376 | # printer registered in self.type_pprinters | |
388 | return self.type_pprinters[cls](obj, self, cycle) |
|
377 | return self.type_pprinters[cls](obj, self, cycle) | |
389 | else: |
|
378 | else: | |
390 | # deferred printer |
|
379 | # deferred printer | |
391 | printer = self._in_deferred_types(cls) |
|
380 | printer = self._in_deferred_types(cls) | |
392 | if printer is not None: |
|
381 | if printer is not None: | |
393 | return printer(obj, self, cycle) |
|
382 | return printer(obj, self, cycle) | |
394 | else: |
|
383 | else: | |
395 | # Finally look for special method names. |
|
384 | # Finally look for special method names. | |
396 | # Some objects automatically create any requested |
|
385 | # Some objects automatically create any requested | |
397 | # attribute. Try to ignore most of them by checking for |
|
386 | # attribute. Try to ignore most of them by checking for | |
398 | # callability. |
|
387 | # callability. | |
399 | if '_repr_pretty_' in cls.__dict__: |
|
388 | if '_repr_pretty_' in cls.__dict__: | |
400 | meth = cls._repr_pretty_ |
|
389 | meth = cls._repr_pretty_ | |
401 | if callable(meth): |
|
390 | if callable(meth): | |
402 | return meth(obj, self, cycle) |
|
391 | return meth(obj, self, cycle) | |
403 | if cls is not object \ |
|
392 | if cls is not object \ | |
404 | and callable(cls.__dict__.get('__repr__')): |
|
393 | and callable(cls.__dict__.get('__repr__')): | |
405 | return _repr_pprint(obj, self, cycle) |
|
394 | return _repr_pprint(obj, self, cycle) | |
406 |
|
395 | |||
407 | return _default_pprint(obj, self, cycle) |
|
396 | return _default_pprint(obj, self, cycle) | |
408 | finally: |
|
397 | finally: | |
409 | self.end_group() |
|
398 | self.end_group() | |
410 | self.stack.pop() |
|
399 | self.stack.pop() | |
411 |
|
400 | |||
412 | def _in_deferred_types(self, cls): |
|
401 | def _in_deferred_types(self, cls): | |
413 | """ |
|
402 | """ | |
414 | Check if the given class is specified in the deferred type registry. |
|
403 | Check if the given class is specified in the deferred type registry. | |
415 |
|
404 | |||
416 | Returns the printer from the registry if it exists, and None if the |
|
405 | Returns the printer from the registry if it exists, and None if the | |
417 | class is not in the registry. Successful matches will be moved to the |
|
406 | class is not in the registry. Successful matches will be moved to the | |
418 | regular type registry for future use. |
|
407 | regular type registry for future use. | |
419 | """ |
|
408 | """ | |
420 | mod = _safe_getattr(cls, '__module__', None) |
|
409 | mod = _safe_getattr(cls, '__module__', None) | |
421 | name = _safe_getattr(cls, '__name__', None) |
|
410 | name = _safe_getattr(cls, '__name__', None) | |
422 | key = (mod, name) |
|
411 | key = (mod, name) | |
423 | printer = None |
|
412 | printer = None | |
424 | if key in self.deferred_pprinters: |
|
413 | if key in self.deferred_pprinters: | |
425 | # Move the printer over to the regular registry. |
|
414 | # Move the printer over to the regular registry. | |
426 | printer = self.deferred_pprinters.pop(key) |
|
415 | printer = self.deferred_pprinters.pop(key) | |
427 | self.type_pprinters[cls] = printer |
|
416 | self.type_pprinters[cls] = printer | |
428 | return printer |
|
417 | return printer | |
429 |
|
418 | |||
430 |
|
419 | |||
431 | class Printable(object): |
|
420 | class Printable(object): | |
432 |
|
421 | |||
433 | def output(self, stream, output_width): |
|
422 | def output(self, stream, output_width): | |
434 | return output_width |
|
423 | return output_width | |
435 |
|
424 | |||
436 |
|
425 | |||
437 | class Text(Printable): |
|
426 | class Text(Printable): | |
438 |
|
427 | |||
439 | def __init__(self): |
|
428 | def __init__(self): | |
440 | self.objs = [] |
|
429 | self.objs = [] | |
441 | self.width = 0 |
|
430 | self.width = 0 | |
442 |
|
431 | |||
443 | def output(self, stream, output_width): |
|
432 | def output(self, stream, output_width): | |
444 | for obj in self.objs: |
|
433 | for obj in self.objs: | |
445 | stream.write(obj) |
|
434 | stream.write(obj) | |
446 | return output_width + self.width |
|
435 | return output_width + self.width | |
447 |
|
436 | |||
448 | def add(self, obj, width): |
|
437 | def add(self, obj, width): | |
449 | self.objs.append(obj) |
|
438 | self.objs.append(obj) | |
450 | self.width += width |
|
439 | self.width += width | |
451 |
|
440 | |||
452 |
|
441 | |||
453 | class Breakable(Printable): |
|
442 | class Breakable(Printable): | |
454 |
|
443 | |||
455 | def __init__(self, seq, width, pretty): |
|
444 | def __init__(self, seq, width, pretty): | |
456 | self.obj = seq |
|
445 | self.obj = seq | |
457 | self.width = width |
|
446 | self.width = width | |
458 | self.pretty = pretty |
|
447 | self.pretty = pretty | |
459 | self.indentation = pretty.indentation |
|
448 | self.indentation = pretty.indentation | |
460 | self.group = pretty.group_stack[-1] |
|
449 | self.group = pretty.group_stack[-1] | |
461 | self.group.breakables.append(self) |
|
450 | self.group.breakables.append(self) | |
462 |
|
451 | |||
463 | def output(self, stream, output_width): |
|
452 | def output(self, stream, output_width): | |
464 | self.group.breakables.popleft() |
|
453 | self.group.breakables.popleft() | |
465 | if self.group.want_break: |
|
454 | if self.group.want_break: | |
466 | stream.write(self.pretty.newline) |
|
455 | stream.write(self.pretty.newline) | |
467 | stream.write(' ' * self.indentation) |
|
456 | stream.write(' ' * self.indentation) | |
468 | return self.indentation |
|
457 | return self.indentation | |
469 | if not self.group.breakables: |
|
458 | if not self.group.breakables: | |
470 | self.pretty.group_queue.remove(self.group) |
|
459 | self.pretty.group_queue.remove(self.group) | |
471 | stream.write(self.obj) |
|
460 | stream.write(self.obj) | |
472 | return output_width + self.width |
|
461 | return output_width + self.width | |
473 |
|
462 | |||
474 |
|
463 | |||
475 | class Group(Printable): |
|
464 | class Group(Printable): | |
476 |
|
465 | |||
477 | def __init__(self, depth): |
|
466 | def __init__(self, depth): | |
478 | self.depth = depth |
|
467 | self.depth = depth | |
479 | self.breakables = deque() |
|
468 | self.breakables = deque() | |
480 | self.want_break = False |
|
469 | self.want_break = False | |
481 |
|
470 | |||
482 |
|
471 | |||
483 | class GroupQueue(object): |
|
472 | class GroupQueue(object): | |
484 |
|
473 | |||
485 | def __init__(self, *groups): |
|
474 | def __init__(self, *groups): | |
486 | self.queue = [] |
|
475 | self.queue = [] | |
487 | for group in groups: |
|
476 | for group in groups: | |
488 | self.enq(group) |
|
477 | self.enq(group) | |
489 |
|
478 | |||
490 | def enq(self, group): |
|
479 | def enq(self, group): | |
491 | depth = group.depth |
|
480 | depth = group.depth | |
492 | while depth > len(self.queue) - 1: |
|
481 | while depth > len(self.queue) - 1: | |
493 | self.queue.append([]) |
|
482 | self.queue.append([]) | |
494 | self.queue[depth].append(group) |
|
483 | self.queue[depth].append(group) | |
495 |
|
484 | |||
496 | def deq(self): |
|
485 | def deq(self): | |
497 | for stack in self.queue: |
|
486 | for stack in self.queue: | |
498 | for idx, group in enumerate(reversed(stack)): |
|
487 | for idx, group in enumerate(reversed(stack)): | |
499 | if group.breakables: |
|
488 | if group.breakables: | |
500 | del stack[idx] |
|
489 | del stack[idx] | |
501 | group.want_break = True |
|
490 | group.want_break = True | |
502 | return group |
|
491 | return group | |
503 | for group in stack: |
|
492 | for group in stack: | |
504 | group.want_break = True |
|
493 | group.want_break = True | |
505 | del stack[:] |
|
494 | del stack[:] | |
506 |
|
495 | |||
507 | def remove(self, group): |
|
496 | def remove(self, group): | |
508 | try: |
|
497 | try: | |
509 | self.queue[group.depth].remove(group) |
|
498 | self.queue[group.depth].remove(group) | |
510 | except ValueError: |
|
499 | except ValueError: | |
511 | pass |
|
500 | pass | |
512 |
|
501 | |||
513 |
|
502 | |||
514 | def _default_pprint(obj, p, cycle): |
|
503 | def _default_pprint(obj, p, cycle): | |
515 | """ |
|
504 | """ | |
516 | The default print function. Used if an object does not provide one and |
|
505 | The default print function. Used if an object does not provide one and | |
517 | it's none of the builtin objects. |
|
506 | it's none of the builtin objects. | |
518 | """ |
|
507 | """ | |
519 | klass = _safe_getattr(obj, '__class__', None) or type(obj) |
|
508 | klass = _safe_getattr(obj, '__class__', None) or type(obj) | |
520 | if _safe_getattr(klass, '__repr__', None) is not object.__repr__: |
|
509 | if _safe_getattr(klass, '__repr__', None) is not object.__repr__: | |
521 | # A user-provided repr. Find newlines and replace them with p.break_() |
|
510 | # A user-provided repr. Find newlines and replace them with p.break_() | |
522 | _repr_pprint(obj, p, cycle) |
|
511 | _repr_pprint(obj, p, cycle) | |
523 | return |
|
512 | return | |
524 | p.begin_group(1, '<') |
|
513 | p.begin_group(1, '<') | |
525 | p.pretty(klass) |
|
514 | p.pretty(klass) | |
526 | p.text(' at 0x%x' % id(obj)) |
|
515 | p.text(' at 0x%x' % id(obj)) | |
527 | if cycle: |
|
516 | if cycle: | |
528 | p.text(' ...') |
|
517 | p.text(' ...') | |
529 | elif p.verbose: |
|
518 | elif p.verbose: | |
530 | first = True |
|
519 | first = True | |
531 | for key in dir(obj): |
|
520 | for key in dir(obj): | |
532 | if not key.startswith('_'): |
|
521 | if not key.startswith('_'): | |
533 | try: |
|
522 | try: | |
534 | value = getattr(obj, key) |
|
523 | value = getattr(obj, key) | |
535 | except AttributeError: |
|
524 | except AttributeError: | |
536 | continue |
|
525 | continue | |
537 | if isinstance(value, types.MethodType): |
|
526 | if isinstance(value, types.MethodType): | |
538 | continue |
|
527 | continue | |
539 | if not first: |
|
528 | if not first: | |
540 | p.text(',') |
|
529 | p.text(',') | |
541 | p.breakable() |
|
530 | p.breakable() | |
542 | p.text(key) |
|
531 | p.text(key) | |
543 | p.text('=') |
|
532 | p.text('=') | |
544 | step = len(key) + 1 |
|
533 | step = len(key) + 1 | |
545 | p.indentation += step |
|
534 | p.indentation += step | |
546 | p.pretty(value) |
|
535 | p.pretty(value) | |
547 | p.indentation -= step |
|
536 | p.indentation -= step | |
548 | first = False |
|
537 | first = False | |
549 | p.end_group(1, '>') |
|
538 | p.end_group(1, '>') | |
550 |
|
539 | |||
551 |
|
540 | |||
552 | def _seq_pprinter_factory(start, end): |
|
541 | def _seq_pprinter_factory(start, end): | |
553 | """ |
|
542 | """ | |
554 | Factory that returns a pprint function useful for sequences. Used by |
|
543 | Factory that returns a pprint function useful for sequences. Used by | |
555 | the default pprint for tuples, dicts, and lists. |
|
544 | the default pprint for tuples, dicts, and lists. | |
556 | """ |
|
545 | """ | |
557 | def inner(obj, p, cycle): |
|
546 | def inner(obj, p, cycle): | |
558 | if cycle: |
|
547 | if cycle: | |
559 | return p.text(start + '...' + end) |
|
548 | return p.text(start + '...' + end) | |
560 | step = len(start) |
|
549 | step = len(start) | |
561 | p.begin_group(step, start) |
|
550 | p.begin_group(step, start) | |
562 | for idx, x in p._enumerate(obj): |
|
551 | for idx, x in p._enumerate(obj): | |
563 | if idx: |
|
552 | if idx: | |
564 | p.text(',') |
|
553 | p.text(',') | |
565 | p.breakable() |
|
554 | p.breakable() | |
566 | p.pretty(x) |
|
555 | p.pretty(x) | |
567 | if len(obj) == 1 and type(obj) is tuple: |
|
556 | if len(obj) == 1 and type(obj) is tuple: | |
568 | # Special case for 1-item tuples. |
|
557 | # Special case for 1-item tuples. | |
569 | p.text(',') |
|
558 | p.text(',') | |
570 | p.end_group(step, end) |
|
559 | p.end_group(step, end) | |
571 | return inner |
|
560 | return inner | |
572 |
|
561 | |||
573 |
|
562 | |||
574 | def _set_pprinter_factory(start, end): |
|
563 | def _set_pprinter_factory(start, end): | |
575 | """ |
|
564 | """ | |
576 | Factory that returns a pprint function useful for sets and frozensets. |
|
565 | Factory that returns a pprint function useful for sets and frozensets. | |
577 | """ |
|
566 | """ | |
578 | def inner(obj, p, cycle): |
|
567 | def inner(obj, p, cycle): | |
579 | if cycle: |
|
568 | if cycle: | |
580 | return p.text(start + '...' + end) |
|
569 | return p.text(start + '...' + end) | |
581 | if len(obj) == 0: |
|
570 | if len(obj) == 0: | |
582 | # Special case. |
|
571 | # Special case. | |
583 | p.text(type(obj).__name__ + '()') |
|
572 | p.text(type(obj).__name__ + '()') | |
584 | else: |
|
573 | else: | |
585 | step = len(start) |
|
574 | step = len(start) | |
586 | p.begin_group(step, start) |
|
575 | p.begin_group(step, start) | |
587 | # Like dictionary keys, we will try to sort the items if there aren't too many |
|
576 | # Like dictionary keys, we will try to sort the items if there aren't too many | |
588 | if not (p.max_seq_length and len(obj) >= p.max_seq_length): |
|
577 | if not (p.max_seq_length and len(obj) >= p.max_seq_length): | |
589 | items = _sorted_for_pprint(obj) |
|
578 | items = _sorted_for_pprint(obj) | |
590 | else: |
|
579 | else: | |
591 | items = obj |
|
580 | items = obj | |
592 | for idx, x in p._enumerate(items): |
|
581 | for idx, x in p._enumerate(items): | |
593 | if idx: |
|
582 | if idx: | |
594 | p.text(',') |
|
583 | p.text(',') | |
595 | p.breakable() |
|
584 | p.breakable() | |
596 | p.pretty(x) |
|
585 | p.pretty(x) | |
597 | p.end_group(step, end) |
|
586 | p.end_group(step, end) | |
598 | return inner |
|
587 | return inner | |
599 |
|
588 | |||
600 |
|
589 | |||
601 | def _dict_pprinter_factory(start, end): |
|
590 | def _dict_pprinter_factory(start, end): | |
602 | """ |
|
591 | """ | |
603 | Factory that returns a pprint function used by the default pprint of |
|
592 | Factory that returns a pprint function used by the default pprint of | |
604 | dicts and dict proxies. |
|
593 | dicts and dict proxies. | |
605 | """ |
|
594 | """ | |
606 | def inner(obj, p, cycle): |
|
595 | def inner(obj, p, cycle): | |
607 | if cycle: |
|
596 | if cycle: | |
608 | return p.text('{...}') |
|
597 | return p.text('{...}') | |
609 | step = len(start) |
|
598 | step = len(start) | |
610 | p.begin_group(step, start) |
|
599 | p.begin_group(step, start) | |
611 | keys = obj.keys() |
|
600 | keys = obj.keys() | |
612 | for idx, key in p._enumerate(keys): |
|
601 | for idx, key in p._enumerate(keys): | |
613 | if idx: |
|
602 | if idx: | |
614 | p.text(',') |
|
603 | p.text(',') | |
615 | p.breakable() |
|
604 | p.breakable() | |
616 | p.pretty(key) |
|
605 | p.pretty(key) | |
617 | p.text(': ') |
|
606 | p.text(': ') | |
618 | p.pretty(obj[key]) |
|
607 | p.pretty(obj[key]) | |
619 | p.end_group(step, end) |
|
608 | p.end_group(step, end) | |
620 | return inner |
|
609 | return inner | |
621 |
|
610 | |||
622 |
|
611 | |||
623 | def _super_pprint(obj, p, cycle): |
|
612 | def _super_pprint(obj, p, cycle): | |
624 | """The pprint for the super type.""" |
|
613 | """The pprint for the super type.""" | |
625 | p.begin_group(8, '<super: ') |
|
614 | p.begin_group(8, '<super: ') | |
626 | p.pretty(obj.__thisclass__) |
|
615 | p.pretty(obj.__thisclass__) | |
627 | p.text(',') |
|
616 | p.text(',') | |
628 | p.breakable() |
|
617 | p.breakable() | |
629 | if PYPY: # In PyPy, super() objects don't have __self__ attributes |
|
618 | if PYPY: # In PyPy, super() objects don't have __self__ attributes | |
630 | dself = obj.__repr__.__self__ |
|
619 | dself = obj.__repr__.__self__ | |
631 | p.pretty(None if dself is obj else dself) |
|
620 | p.pretty(None if dself is obj else dself) | |
632 | else: |
|
621 | else: | |
633 | p.pretty(obj.__self__) |
|
622 | p.pretty(obj.__self__) | |
634 | p.end_group(8, '>') |
|
623 | p.end_group(8, '>') | |
635 |
|
624 | |||
636 |
|
625 | |||
637 | def _re_pattern_pprint(obj, p, cycle): |
|
626 | def _re_pattern_pprint(obj, p, cycle): | |
638 | """The pprint function for regular expression patterns.""" |
|
627 | """The pprint function for regular expression patterns.""" | |
639 | p.text('re.compile(') |
|
628 | p.text('re.compile(') | |
640 | pattern = repr(obj.pattern) |
|
629 | pattern = repr(obj.pattern) | |
641 | if pattern[:1] in 'uU': |
|
630 | if pattern[:1] in 'uU': | |
642 | pattern = pattern[1:] |
|
631 | pattern = pattern[1:] | |
643 | prefix = 'ur' |
|
632 | prefix = 'ur' | |
644 | else: |
|
633 | else: | |
645 | prefix = 'r' |
|
634 | prefix = 'r' | |
646 | pattern = prefix + pattern.replace('\\\\', '\\') |
|
635 | pattern = prefix + pattern.replace('\\\\', '\\') | |
647 | p.text(pattern) |
|
636 | p.text(pattern) | |
648 | if obj.flags: |
|
637 | if obj.flags: | |
649 | p.text(',') |
|
638 | p.text(',') | |
650 | p.breakable() |
|
639 | p.breakable() | |
651 | done_one = False |
|
640 | done_one = False | |
652 | for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', |
|
641 | for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', | |
653 | 'UNICODE', 'VERBOSE', 'DEBUG'): |
|
642 | 'UNICODE', 'VERBOSE', 'DEBUG'): | |
654 | if obj.flags & getattr(re, flag): |
|
643 | if obj.flags & getattr(re, flag): | |
655 | if done_one: |
|
644 | if done_one: | |
656 | p.text('|') |
|
645 | p.text('|') | |
657 | p.text('re.' + flag) |
|
646 | p.text('re.' + flag) | |
658 | done_one = True |
|
647 | done_one = True | |
659 | p.text(')') |
|
648 | p.text(')') | |
660 |
|
649 | |||
661 |
|
650 | |||
662 | def _type_pprint(obj, p, cycle): |
|
651 | def _type_pprint(obj, p, cycle): | |
663 | """The pprint for classes and types.""" |
|
652 | """The pprint for classes and types.""" | |
664 | # Heap allocated types might not have the module attribute, |
|
653 | # Heap allocated types might not have the module attribute, | |
665 | # and others may set it to None. |
|
654 | # and others may set it to None. | |
666 |
|
655 | |||
667 | # Checks for a __repr__ override in the metaclass. Can't compare the |
|
656 | # Checks for a __repr__ override in the metaclass. Can't compare the | |
668 | # type(obj).__repr__ directly because in PyPy the representation function |
|
657 | # type(obj).__repr__ directly because in PyPy the representation function | |
669 | # inherited from type isn't the same type.__repr__ |
|
658 | # inherited from type isn't the same type.__repr__ | |
670 | if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]: |
|
659 | if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]: | |
671 | _repr_pprint(obj, p, cycle) |
|
660 | _repr_pprint(obj, p, cycle) | |
672 | return |
|
661 | return | |
673 |
|
662 | |||
674 | mod = _safe_getattr(obj, '__module__', None) |
|
663 | mod = _safe_getattr(obj, '__module__', None) | |
675 | try: |
|
664 | try: | |
676 | name = obj.__qualname__ |
|
665 | name = obj.__qualname__ | |
677 | if not isinstance(name, str): |
|
666 | if not isinstance(name, str): | |
678 | # This can happen if the type implements __qualname__ as a property |
|
667 | # This can happen if the type implements __qualname__ as a property | |
679 | # or other descriptor in Python 2. |
|
668 | # or other descriptor in Python 2. | |
680 | raise Exception("Try __name__") |
|
669 | raise Exception("Try __name__") | |
681 | except Exception: |
|
670 | except Exception: | |
682 | name = obj.__name__ |
|
671 | name = obj.__name__ | |
683 | if not isinstance(name, str): |
|
672 | if not isinstance(name, str): | |
684 | name = '<unknown type>' |
|
673 | name = '<unknown type>' | |
685 |
|
674 | |||
686 | if mod in (None, '__builtin__', 'builtins', 'exceptions'): |
|
675 | if mod in (None, '__builtin__', 'builtins', 'exceptions'): | |
687 | p.text(name) |
|
676 | p.text(name) | |
688 | else: |
|
677 | else: | |
689 | p.text(mod + '.' + name) |
|
678 | p.text(mod + '.' + name) | |
690 |
|
679 | |||
691 |
|
680 | |||
692 | def _repr_pprint(obj, p, cycle): |
|
681 | def _repr_pprint(obj, p, cycle): | |
693 | """A pprint that just redirects to the normal repr function.""" |
|
682 | """A pprint that just redirects to the normal repr function.""" | |
694 | # Find newlines and replace them with p.break_() |
|
683 | # Find newlines and replace them with p.break_() | |
695 | output = repr(obj) |
|
684 | output = repr(obj) | |
696 | lines = output.splitlines() |
|
685 | lines = output.splitlines() | |
697 | with p.group(): |
|
686 | with p.group(): | |
698 | for idx, output_line in enumerate(lines): |
|
687 | for idx, output_line in enumerate(lines): | |
699 | if idx: |
|
688 | if idx: | |
700 | p.break_() |
|
689 | p.break_() | |
701 | p.text(output_line) |
|
690 | p.text(output_line) | |
702 |
|
691 | |||
703 |
|
692 | |||
704 | def _function_pprint(obj, p, cycle): |
|
693 | def _function_pprint(obj, p, cycle): | |
705 | """Base pprint for all functions and builtin functions.""" |
|
694 | """Base pprint for all functions and builtin functions.""" | |
706 | name = _safe_getattr(obj, '__qualname__', obj.__name__) |
|
695 | name = _safe_getattr(obj, '__qualname__', obj.__name__) | |
707 | mod = obj.__module__ |
|
696 | mod = obj.__module__ | |
708 | if mod and mod not in ('__builtin__', 'builtins', 'exceptions'): |
|
697 | if mod and mod not in ('__builtin__', 'builtins', 'exceptions'): | |
709 | name = mod + '.' + name |
|
698 | name = mod + '.' + name | |
710 | try: |
|
699 | try: | |
711 | func_def = name + str(signature(obj)) |
|
700 | func_def = name + str(signature(obj)) | |
712 | except ValueError: |
|
701 | except ValueError: | |
713 | func_def = name |
|
702 | func_def = name | |
714 | p.text('<function %s>' % func_def) |
|
703 | p.text('<function %s>' % func_def) | |
715 |
|
704 | |||
716 |
|
705 | |||
717 | def _exception_pprint(obj, p, cycle): |
|
706 | def _exception_pprint(obj, p, cycle): | |
718 | """Base pprint for all exceptions.""" |
|
707 | """Base pprint for all exceptions.""" | |
719 | name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__) |
|
708 | name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__) | |
720 | if obj.__class__.__module__ not in ('exceptions', 'builtins'): |
|
709 | if obj.__class__.__module__ not in ('exceptions', 'builtins'): | |
721 | name = '%s.%s' % (obj.__class__.__module__, name) |
|
710 | name = '%s.%s' % (obj.__class__.__module__, name) | |
722 | step = len(name) + 1 |
|
711 | step = len(name) + 1 | |
723 | p.begin_group(step, name + '(') |
|
712 | p.begin_group(step, name + '(') | |
724 | for idx, arg in enumerate(getattr(obj, 'args', ())): |
|
713 | for idx, arg in enumerate(getattr(obj, 'args', ())): | |
725 | if idx: |
|
714 | if idx: | |
726 | p.text(',') |
|
715 | p.text(',') | |
727 | p.breakable() |
|
716 | p.breakable() | |
728 | p.pretty(arg) |
|
717 | p.pretty(arg) | |
729 | p.end_group(step, ')') |
|
718 | p.end_group(step, ')') | |
730 |
|
719 | |||
731 |
|
720 | |||
732 | #: the exception base |
|
721 | #: the exception base | |
733 | try: |
|
722 | try: | |
734 | _exception_base = BaseException |
|
723 | _exception_base = BaseException | |
735 | except NameError: |
|
724 | except NameError: | |
736 | _exception_base = Exception |
|
725 | _exception_base = Exception | |
737 |
|
726 | |||
738 |
|
727 | |||
739 | #: printers for builtin types |
|
728 | #: printers for builtin types | |
740 | _type_pprinters = { |
|
729 | _type_pprinters = { | |
741 | int: _repr_pprint, |
|
730 | int: _repr_pprint, | |
742 | float: _repr_pprint, |
|
731 | float: _repr_pprint, | |
743 | str: _repr_pprint, |
|
732 | str: _repr_pprint, | |
744 | tuple: _seq_pprinter_factory('(', ')'), |
|
733 | tuple: _seq_pprinter_factory('(', ')'), | |
745 | list: _seq_pprinter_factory('[', ']'), |
|
734 | list: _seq_pprinter_factory('[', ']'), | |
746 | dict: _dict_pprinter_factory('{', '}'), |
|
735 | dict: _dict_pprinter_factory('{', '}'), | |
747 | set: _set_pprinter_factory('{', '}'), |
|
736 | set: _set_pprinter_factory('{', '}'), | |
748 | frozenset: _set_pprinter_factory('frozenset({', '})'), |
|
737 | frozenset: _set_pprinter_factory('frozenset({', '})'), | |
749 | super: _super_pprint, |
|
738 | super: _super_pprint, | |
750 | _re_pattern_type: _re_pattern_pprint, |
|
739 | _re_pattern_type: _re_pattern_pprint, | |
751 | type: _type_pprint, |
|
740 | type: _type_pprint, | |
752 | types.FunctionType: _function_pprint, |
|
741 | types.FunctionType: _function_pprint, | |
753 | types.BuiltinFunctionType: _function_pprint, |
|
742 | types.BuiltinFunctionType: _function_pprint, | |
754 | types.MethodType: _repr_pprint, |
|
743 | types.MethodType: _repr_pprint, | |
755 | datetime.datetime: _repr_pprint, |
|
744 | datetime.datetime: _repr_pprint, | |
756 | datetime.timedelta: _repr_pprint, |
|
745 | datetime.timedelta: _repr_pprint, | |
757 | _exception_base: _exception_pprint |
|
746 | _exception_base: _exception_pprint | |
758 | } |
|
747 | } | |
759 |
|
748 | |||
760 | # render os.environ like a dict |
|
749 | # render os.environ like a dict | |
761 | _env_type = type(os.environ) |
|
750 | _env_type = type(os.environ) | |
762 | # future-proof in case os.environ becomes a plain dict? |
|
751 | # future-proof in case os.environ becomes a plain dict? | |
763 | if _env_type is not dict: |
|
752 | if _env_type is not dict: | |
764 | _type_pprinters[_env_type] = _dict_pprinter_factory('environ{', '}') |
|
753 | _type_pprinters[_env_type] = _dict_pprinter_factory('environ{', '}') | |
765 |
|
754 | |||
766 | try: |
|
755 | try: | |
767 | # In PyPy, types.DictProxyType is dict, setting the dictproxy printer |
|
756 | # In PyPy, types.DictProxyType is dict, setting the dictproxy printer | |
768 | # using dict.setdefault avoids overwriting the dict printer |
|
757 | # using dict.setdefault avoids overwriting the dict printer | |
769 | _type_pprinters.setdefault(types.DictProxyType, |
|
758 | _type_pprinters.setdefault(types.DictProxyType, | |
770 | _dict_pprinter_factory('dict_proxy({', '})')) |
|
759 | _dict_pprinter_factory('dict_proxy({', '})')) | |
771 | _type_pprinters[types.ClassType] = _type_pprint |
|
760 | _type_pprinters[types.ClassType] = _type_pprint | |
772 | _type_pprinters[types.SliceType] = _repr_pprint |
|
761 | _type_pprinters[types.SliceType] = _repr_pprint | |
773 | except AttributeError: # Python 3 |
|
762 | except AttributeError: # Python 3 | |
774 | _type_pprinters[types.MappingProxyType] = \ |
|
763 | _type_pprinters[types.MappingProxyType] = \ | |
775 | _dict_pprinter_factory('mappingproxy({', '})') |
|
764 | _dict_pprinter_factory('mappingproxy({', '})') | |
776 | _type_pprinters[slice] = _repr_pprint |
|
765 | _type_pprinters[slice] = _repr_pprint | |
777 |
|
766 | |||
778 | try: |
|
767 | try: | |
779 | _type_pprinters[long] = _repr_pprint |
|
768 | _type_pprinters[long] = _repr_pprint | |
780 | _type_pprinters[unicode] = _repr_pprint |
|
769 | _type_pprinters[unicode] = _repr_pprint | |
781 | except NameError: |
|
770 | except NameError: | |
782 | _type_pprinters[range] = _repr_pprint |
|
771 | _type_pprinters[range] = _repr_pprint | |
783 | _type_pprinters[bytes] = _repr_pprint |
|
772 | _type_pprinters[bytes] = _repr_pprint | |
784 |
|
773 | |||
785 | #: printers for types specified by name |
|
774 | #: printers for types specified by name | |
786 | _deferred_type_pprinters = { |
|
775 | _deferred_type_pprinters = { | |
787 | } |
|
776 | } | |
788 |
|
777 | |||
789 | def for_type(typ, func): |
|
778 | def for_type(typ, func): | |
790 | """ |
|
779 | """ | |
791 | Add a pretty printer for a given type. |
|
780 | Add a pretty printer for a given type. | |
792 | """ |
|
781 | """ | |
793 | oldfunc = _type_pprinters.get(typ, None) |
|
782 | oldfunc = _type_pprinters.get(typ, None) | |
794 | if func is not None: |
|
783 | if func is not None: | |
795 | # To support easy restoration of old pprinters, we need to ignore Nones. |
|
784 | # To support easy restoration of old pprinters, we need to ignore Nones. | |
796 | _type_pprinters[typ] = func |
|
785 | _type_pprinters[typ] = func | |
797 | return oldfunc |
|
786 | return oldfunc | |
798 |
|
787 | |||
799 | def for_type_by_name(type_module, type_name, func): |
|
788 | def for_type_by_name(type_module, type_name, func): | |
800 | """ |
|
789 | """ | |
801 | Add a pretty printer for a type specified by the module and name of a type |
|
790 | Add a pretty printer for a type specified by the module and name of a type | |
802 | rather than the type object itself. |
|
791 | rather than the type object itself. | |
803 | """ |
|
792 | """ | |
804 | key = (type_module, type_name) |
|
793 | key = (type_module, type_name) | |
805 | oldfunc = _deferred_type_pprinters.get(key, None) |
|
794 | oldfunc = _deferred_type_pprinters.get(key, None) | |
806 | if func is not None: |
|
795 | if func is not None: | |
807 | # To support easy restoration of old pprinters, we need to ignore Nones. |
|
796 | # To support easy restoration of old pprinters, we need to ignore Nones. | |
808 | _deferred_type_pprinters[key] = func |
|
797 | _deferred_type_pprinters[key] = func | |
809 | return oldfunc |
|
798 | return oldfunc | |
810 |
|
799 | |||
811 |
|
800 | |||
812 | #: printers for the default singletons |
|
801 | #: printers for the default singletons | |
813 | _singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis, |
|
802 | _singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis, | |
814 | NotImplemented]), _repr_pprint) |
|
803 | NotImplemented]), _repr_pprint) | |
815 |
|
804 | |||
816 |
|
805 | |||
817 | def _defaultdict_pprint(obj, p, cycle): |
|
806 | def _defaultdict_pprint(obj, p, cycle): | |
818 | name = obj.__class__.__name__ |
|
807 | name = obj.__class__.__name__ | |
819 | with p.group(len(name) + 1, name + '(', ')'): |
|
808 | with p.group(len(name) + 1, name + '(', ')'): | |
820 | if cycle: |
|
809 | if cycle: | |
821 | p.text('...') |
|
810 | p.text('...') | |
822 | else: |
|
811 | else: | |
823 | p.pretty(obj.default_factory) |
|
812 | p.pretty(obj.default_factory) | |
824 | p.text(',') |
|
813 | p.text(',') | |
825 | p.breakable() |
|
814 | p.breakable() | |
826 | p.pretty(dict(obj)) |
|
815 | p.pretty(dict(obj)) | |
827 |
|
816 | |||
828 | def _ordereddict_pprint(obj, p, cycle): |
|
817 | def _ordereddict_pprint(obj, p, cycle): | |
829 | name = obj.__class__.__name__ |
|
818 | name = obj.__class__.__name__ | |
830 | with p.group(len(name) + 1, name + '(', ')'): |
|
819 | with p.group(len(name) + 1, name + '(', ')'): | |
831 | if cycle: |
|
820 | if cycle: | |
832 | p.text('...') |
|
821 | p.text('...') | |
833 | elif len(obj): |
|
822 | elif len(obj): | |
834 | p.pretty(list(obj.items())) |
|
823 | p.pretty(list(obj.items())) | |
835 |
|
824 | |||
836 | def _deque_pprint(obj, p, cycle): |
|
825 | def _deque_pprint(obj, p, cycle): | |
837 | name = obj.__class__.__name__ |
|
826 | name = obj.__class__.__name__ | |
838 | with p.group(len(name) + 1, name + '(', ')'): |
|
827 | with p.group(len(name) + 1, name + '(', ')'): | |
839 | if cycle: |
|
828 | if cycle: | |
840 | p.text('...') |
|
829 | p.text('...') | |
841 | else: |
|
830 | else: | |
842 | p.pretty(list(obj)) |
|
831 | p.pretty(list(obj)) | |
843 |
|
832 | |||
844 |
|
833 | |||
845 | def _counter_pprint(obj, p, cycle): |
|
834 | def _counter_pprint(obj, p, cycle): | |
846 | name = obj.__class__.__name__ |
|
835 | name = obj.__class__.__name__ | |
847 | with p.group(len(name) + 1, name + '(', ')'): |
|
836 | with p.group(len(name) + 1, name + '(', ')'): | |
848 | if cycle: |
|
837 | if cycle: | |
849 | p.text('...') |
|
838 | p.text('...') | |
850 | elif len(obj): |
|
839 | elif len(obj): | |
851 | p.pretty(dict(obj)) |
|
840 | p.pretty(dict(obj)) | |
852 |
|
841 | |||
853 | for_type_by_name('collections', 'defaultdict', _defaultdict_pprint) |
|
842 | for_type_by_name('collections', 'defaultdict', _defaultdict_pprint) | |
854 | for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint) |
|
843 | for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint) | |
855 | for_type_by_name('collections', 'deque', _deque_pprint) |
|
844 | for_type_by_name('collections', 'deque', _deque_pprint) | |
856 | for_type_by_name('collections', 'Counter', _counter_pprint) |
|
845 | for_type_by_name('collections', 'Counter', _counter_pprint) | |
857 |
|
846 | |||
858 | if __name__ == '__main__': |
|
847 | if __name__ == '__main__': | |
859 | from random import randrange |
|
848 | from random import randrange | |
860 | class Foo(object): |
|
849 | class Foo(object): | |
861 | def __init__(self): |
|
850 | def __init__(self): | |
862 | self.foo = 1 |
|
851 | self.foo = 1 | |
863 | self.bar = re.compile(r'\s+') |
|
852 | self.bar = re.compile(r'\s+') | |
864 | self.blub = dict.fromkeys(range(30), randrange(1, 40)) |
|
853 | self.blub = dict.fromkeys(range(30), randrange(1, 40)) | |
865 | self.hehe = 23424.234234 |
|
854 | self.hehe = 23424.234234 | |
866 | self.list = ["blub", "blah", self] |
|
855 | self.list = ["blub", "blah", self] | |
867 |
|
856 | |||
868 | def get_foo(self): |
|
857 | def get_foo(self): | |
869 | print("foo") |
|
858 | print("foo") | |
870 |
|
859 | |||
871 | pprint(Foo(), verbose=True) |
|
860 | pprint(Foo(), verbose=True) |
@@ -1,71 +1,71 b'' | |||||
1 | # coding: utf-8 |
|
1 | # coding: utf-8 | |
2 | """ |
|
2 | """ | |
3 | Utilities for dealing with text encodings |
|
3 | Utilities for dealing with text encodings | |
4 | """ |
|
4 | """ | |
5 |
|
5 | |||
6 | #----------------------------------------------------------------------------- |
|
6 | #----------------------------------------------------------------------------- | |
7 | # Copyright (C) 2008-2012 The IPython Development Team |
|
7 | # Copyright (C) 2008-2012 The IPython Development Team | |
8 | # |
|
8 | # | |
9 | # Distributed under the terms of the BSD License. The full license is in |
|
9 | # Distributed under the terms of the BSD License. The full license is in | |
10 | # the file COPYING, distributed as part of this software. |
|
10 | # the file COPYING, distributed as part of this software. | |
11 | #----------------------------------------------------------------------------- |
|
11 | #----------------------------------------------------------------------------- | |
12 |
|
12 | |||
13 | #----------------------------------------------------------------------------- |
|
13 | #----------------------------------------------------------------------------- | |
14 | # Imports |
|
14 | # Imports | |
15 | #----------------------------------------------------------------------------- |
|
15 | #----------------------------------------------------------------------------- | |
16 | import sys |
|
16 | import sys | |
17 | import locale |
|
17 | import locale | |
18 | import warnings |
|
18 | import warnings | |
19 |
|
19 | |||
20 | # to deal with the possibility of sys.std* not being a stream at all |
|
20 | # to deal with the possibility of sys.std* not being a stream at all | |
21 | def get_stream_enc(stream, default=None): |
|
21 | def get_stream_enc(stream, default=None): | |
22 | """Return the given stream's encoding or a default. |
|
22 | """Return the given stream's encoding or a default. | |
23 |
|
23 | |||
24 | There are cases where ``sys.std*`` might not actually be a stream, so |
|
24 | There are cases where ``sys.std*`` might not actually be a stream, so | |
25 | check for the encoding attribute prior to returning it, and return |
|
25 | check for the encoding attribute prior to returning it, and return | |
26 | a default if it doesn't exist or evaluates as False. ``default`` |
|
26 | a default if it doesn't exist or evaluates as False. ``default`` | |
27 | is None if not provided. |
|
27 | is None if not provided. | |
28 | """ |
|
28 | """ | |
29 | if not hasattr(stream, 'encoding') or not stream.encoding: |
|
29 | if not hasattr(stream, 'encoding') or not stream.encoding: | |
30 | return default |
|
30 | return default | |
31 | else: |
|
31 | else: | |
32 | return stream.encoding |
|
32 | return stream.encoding | |
33 |
|
33 | |||
34 | # Less conservative replacement for sys.getdefaultencoding, that will try |
|
34 | # Less conservative replacement for sys.getdefaultencoding, that will try | |
35 | # to match the environment. |
|
35 | # to match the environment. | |
36 | # Defined here as central function, so if we find better choices, we |
|
36 | # Defined here as central function, so if we find better choices, we | |
37 | # won't need to make changes all over IPython. |
|
37 | # won't need to make changes all over IPython. | |
38 | def getdefaultencoding(prefer_stream=True): |
|
38 | def getdefaultencoding(prefer_stream=True): | |
39 | """Return IPython's guess for the default encoding for bytes as text. |
|
39 | """Return IPython's guess for the default encoding for bytes as text. | |
40 |
|
40 | |||
41 | If prefer_stream is True (default), asks for stdin.encoding first, |
|
41 | If prefer_stream is True (default), asks for stdin.encoding first, | |
42 | to match the calling Terminal, but that is often None for subprocesses. |
|
42 | to match the calling Terminal, but that is often None for subprocesses. | |
43 |
|
43 | |||
44 | Then fall back on locale.getpreferredencoding(), |
|
44 | Then fall back on locale.getpreferredencoding(), | |
45 | which should be a sensible platform default (that respects LANG environment), |
|
45 | which should be a sensible platform default (that respects LANG environment), | |
46 | and finally to sys.getdefaultencoding() which is the most conservative option, |
|
46 | and finally to sys.getdefaultencoding() which is the most conservative option, | |
47 |
and usually |
|
47 | and usually UTF8 as of Python 3. | |
48 | """ |
|
48 | """ | |
49 | enc = None |
|
49 | enc = None | |
50 | if prefer_stream: |
|
50 | if prefer_stream: | |
51 | enc = get_stream_enc(sys.stdin) |
|
51 | enc = get_stream_enc(sys.stdin) | |
52 | if not enc or enc=='ascii': |
|
52 | if not enc or enc=='ascii': | |
53 | try: |
|
53 | try: | |
54 | # There are reports of getpreferredencoding raising errors |
|
54 | # There are reports of getpreferredencoding raising errors | |
55 | # in some cases, which may well be fixed, but let's be conservative here. |
|
55 | # in some cases, which may well be fixed, but let's be conservative here. | |
56 | enc = locale.getpreferredencoding() |
|
56 | enc = locale.getpreferredencoding() | |
57 | except Exception: |
|
57 | except Exception: | |
58 | pass |
|
58 | pass | |
59 | enc = enc or sys.getdefaultencoding() |
|
59 | enc = enc or sys.getdefaultencoding() | |
60 | # On windows `cp0` can be returned to indicate that there is no code page. |
|
60 | # On windows `cp0` can be returned to indicate that there is no code page. | |
61 | # Since cp0 is an invalid encoding return instead cp1252 which is the |
|
61 | # Since cp0 is an invalid encoding return instead cp1252 which is the | |
62 | # Western European default. |
|
62 | # Western European default. | |
63 | if enc == 'cp0': |
|
63 | if enc == 'cp0': | |
64 | warnings.warn( |
|
64 | warnings.warn( | |
65 | "Invalid code page cp0 detected - using cp1252 instead." |
|
65 | "Invalid code page cp0 detected - using cp1252 instead." | |
66 | "If cp1252 is incorrect please ensure a valid code page " |
|
66 | "If cp1252 is incorrect please ensure a valid code page " | |
67 | "is defined for the process.", RuntimeWarning) |
|
67 | "is defined for the process.", RuntimeWarning) | |
68 | return 'cp1252' |
|
68 | return 'cp1252' | |
69 | return enc |
|
69 | return enc | |
70 |
|
70 | |||
71 | DEFAULT_ENCODING = getdefaultencoding() |
|
71 | DEFAULT_ENCODING = getdefaultencoding() |
@@ -1,105 +1,103 b'' | |||||
1 | """ |
|
1 | """ | |
2 | Tools to open .py files as Unicode, using the encoding specified within the file, |
|
2 | Tools to open .py files as Unicode, using the encoding specified within the file, | |
3 | as per PEP 263. |
|
3 | as per PEP 263. | |
4 |
|
4 | |||
5 | Much of the code is taken from the tokenize module in Python 3.2. |
|
5 | Much of the code is taken from the tokenize module in Python 3.2. | |
6 | """ |
|
6 | """ | |
7 |
|
7 | |||
8 | import io |
|
8 | import io | |
9 | from io import TextIOWrapper, BytesIO |
|
9 | from io import TextIOWrapper, BytesIO | |
10 | import re |
|
10 | import re | |
11 | from tokenize import open, detect_encoding |
|
11 | from tokenize import open, detect_encoding | |
12 |
|
12 | |||
13 | cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE) |
|
13 | cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE) | |
14 | cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE) |
|
14 | cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE) | |
15 |
|
15 | |||
16 | def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): |
|
16 | def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): | |
17 | """Converts a bytes string with python source code to unicode. |
|
17 | """Converts a bytes string with python source code to unicode. | |
18 |
|
18 | |||
19 | Unicode strings are passed through unchanged. Byte strings are checked |
|
19 | Unicode strings are passed through unchanged. Byte strings are checked | |
20 | for the python source file encoding cookie to determine encoding. |
|
20 | for the python source file encoding cookie to determine encoding. | |
21 | txt can be either a bytes buffer or a string containing the source |
|
21 | txt can be either a bytes buffer or a string containing the source | |
22 | code. |
|
22 | code. | |
23 | """ |
|
23 | """ | |
24 | if isinstance(txt, str): |
|
24 | if isinstance(txt, str): | |
25 | return txt |
|
25 | return txt | |
26 | if isinstance(txt, bytes): |
|
26 | if isinstance(txt, bytes): | |
27 | buffer = BytesIO(txt) |
|
27 | buffer = BytesIO(txt) | |
28 | else: |
|
28 | else: | |
29 | buffer = txt |
|
29 | buffer = txt | |
30 | try: |
|
30 | try: | |
31 | encoding, _ = detect_encoding(buffer.readline) |
|
31 | encoding, _ = detect_encoding(buffer.readline) | |
32 | except SyntaxError: |
|
32 | except SyntaxError: | |
33 | encoding = "ascii" |
|
33 | encoding = "ascii" | |
34 | buffer.seek(0) |
|
34 | buffer.seek(0) | |
35 | with TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) as text: |
|
35 | with TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) as text: | |
36 | text.mode = 'r' |
|
36 | text.mode = 'r' | |
37 | if skip_encoding_cookie: |
|
37 | if skip_encoding_cookie: | |
38 | return u"".join(strip_encoding_cookie(text)) |
|
38 | return u"".join(strip_encoding_cookie(text)) | |
39 | else: |
|
39 | else: | |
40 | return text.read() |
|
40 | return text.read() | |
41 |
|
41 | |||
42 | def strip_encoding_cookie(filelike): |
|
42 | def strip_encoding_cookie(filelike): | |
43 | """Generator to pull lines from a text-mode file, skipping the encoding |
|
43 | """Generator to pull lines from a text-mode file, skipping the encoding | |
44 | cookie if it is found in the first two lines. |
|
44 | cookie if it is found in the first two lines. | |
45 | """ |
|
45 | """ | |
46 | it = iter(filelike) |
|
46 | it = iter(filelike) | |
47 | try: |
|
47 | try: | |
48 | first = next(it) |
|
48 | first = next(it) | |
49 | if not cookie_comment_re.match(first): |
|
49 | if not cookie_comment_re.match(first): | |
50 | yield first |
|
50 | yield first | |
51 | second = next(it) |
|
51 | second = next(it) | |
52 | if not cookie_comment_re.match(second): |
|
52 | if not cookie_comment_re.match(second): | |
53 | yield second |
|
53 | yield second | |
54 | except StopIteration: |
|
54 | except StopIteration: | |
55 | return |
|
55 | return | |
56 |
|
56 | |||
57 | for line in it: |
|
57 | for line in it: | |
58 | yield line |
|
58 | yield line | |
59 |
|
59 | |||
60 | def read_py_file(filename, skip_encoding_cookie=True): |
|
60 | def read_py_file(filename, skip_encoding_cookie=True): | |
61 | """Read a Python file, using the encoding declared inside the file. |
|
61 | """Read a Python file, using the encoding declared inside the file. | |
62 |
|
62 | |||
63 | Parameters |
|
63 | Parameters | |
64 | ---------- |
|
64 | ---------- | |
65 | filename : str |
|
65 | filename : str | |
66 | The path to the file to read. |
|
66 | The path to the file to read. | |
67 | skip_encoding_cookie : bool |
|
67 | skip_encoding_cookie : bool | |
68 | If True (the default), and the encoding declaration is found in the first |
|
68 | If True (the default), and the encoding declaration is found in the first | |
69 |
two lines, that line will be excluded from the output |
|
69 | two lines, that line will be excluded from the output. | |
70 | unicode string with an encoding declaration is a SyntaxError in Python 2. |
|
|||
71 |
|
70 | |||
72 | Returns |
|
71 | Returns | |
73 | ------- |
|
72 | ------- | |
74 | A unicode string containing the contents of the file. |
|
73 | A unicode string containing the contents of the file. | |
75 | """ |
|
74 | """ | |
76 | with open(filename) as f: # the open function defined in this module. |
|
75 | with open(filename) as f: # the open function defined in this module. | |
77 | if skip_encoding_cookie: |
|
76 | if skip_encoding_cookie: | |
78 | return "".join(strip_encoding_cookie(f)) |
|
77 | return "".join(strip_encoding_cookie(f)) | |
79 | else: |
|
78 | else: | |
80 | return f.read() |
|
79 | return f.read() | |
81 |
|
80 | |||
82 | def read_py_url(url, errors='replace', skip_encoding_cookie=True): |
|
81 | def read_py_url(url, errors='replace', skip_encoding_cookie=True): | |
83 | """Read a Python file from a URL, using the encoding declared inside the file. |
|
82 | """Read a Python file from a URL, using the encoding declared inside the file. | |
84 |
|
83 | |||
85 | Parameters |
|
84 | Parameters | |
86 | ---------- |
|
85 | ---------- | |
87 | url : str |
|
86 | url : str | |
88 | The URL from which to fetch the file. |
|
87 | The URL from which to fetch the file. | |
89 | errors : str |
|
88 | errors : str | |
90 | How to handle decoding errors in the file. Options are the same as for |
|
89 | How to handle decoding errors in the file. Options are the same as for | |
91 | bytes.decode(), but here 'replace' is the default. |
|
90 | bytes.decode(), but here 'replace' is the default. | |
92 | skip_encoding_cookie : bool |
|
91 | skip_encoding_cookie : bool | |
93 | If True (the default), and the encoding declaration is found in the first |
|
92 | If True (the default), and the encoding declaration is found in the first | |
94 |
two lines, that line will be excluded from the output |
|
93 | two lines, that line will be excluded from the output. | |
95 | unicode string with an encoding declaration is a SyntaxError in Python 2. |
|
|||
96 |
|
94 | |||
97 | Returns |
|
95 | Returns | |
98 | ------- |
|
96 | ------- | |
99 | A unicode string containing the contents of the file. |
|
97 | A unicode string containing the contents of the file. | |
100 | """ |
|
98 | """ | |
101 | # Deferred import for faster start |
|
99 | # Deferred import for faster start | |
102 | from urllib.request import urlopen |
|
100 | from urllib.request import urlopen | |
103 | response = urlopen(url) |
|
101 | response = urlopen(url) | |
104 | buffer = io.BytesIO(response.read()) |
|
102 | buffer = io.BytesIO(response.read()) | |
105 | return source_to_unicode(buffer, errors, skip_encoding_cookie) |
|
103 | return source_to_unicode(buffer, errors, skip_encoding_cookie) |
General Comments 0
You need to be logged in to leave comments.
Login now